|
|
@ -195,9 +195,24 @@ namespace dsacache { |
|
|
|
// checked and otherwise the specified
|
|
|
|
// node - no checks on node validity
|
|
|
|
void Flush(const int node = -1); |
|
|
|
|
|
|
|
// forces out all entries from the
|
|
|
|
// cache and therefore will also "forget"
|
|
|
|
// still-in-use entries, these will still
|
|
|
|
// be properly deleted, but the cache
|
|
|
|
// will be fresh - use for testing
|
|
|
|
void Clear(); |
|
|
|
}; |
|
|
|
} |
|
|
|
|
|
|
|
inline void dsacache::Cache::Clear() { |
|
|
|
std::unique_lock<std::shared_mutex> lock(cache_mutex_); |
|
|
|
|
|
|
|
cache_state_.clear(); |
|
|
|
|
|
|
|
Init(cache_policy_function_, copy_policy_function_); |
|
|
|
} |
|
|
|
|
|
|
|
inline void dsacache::Cache::Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function) { |
|
|
|
cache_policy_function_ = cache_policy_function; |
|
|
|
copy_policy_function_ = copy_policy_function; |
|
|
@ -221,8 +236,6 @@ inline void dsacache::Cache::Init(CachePolicy* cache_policy_function, CopyPolicy |
|
|
|
cache_state_.insert({node,{}}); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
std::cout << "[-] Cache Initialized" << std::endl; |
|
|
|
} |
|
|
|
|
|
|
|
inline std::unique_ptr<dsacache::CacheData> dsacache::Cache::Access(uint8_t* data, const size_t size) { |
|
|
@ -314,9 +327,6 @@ inline uint8_t* dsacache::Cache::AllocOnNode(const size_t size, const int node) |
|
|
|
} |
|
|
|
|
|
|
|
inline void dsacache::Cache::SubmitTask(CacheData* task, const int dst_node, const int src_node) { |
|
|
|
std::cout << "[+] Allocating " << task->size_ << "B on node " << dst_node << " for " << std::hex << (uint64_t)task->src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
|
|
|
|
uint8_t* dst = AllocOnNode(task->size_, dst_node); |
|
|
|
|
|
|
|
if (dst == nullptr) { |
|
|
@ -338,8 +348,6 @@ inline void dsacache::Cache::SubmitTask(CacheData* task, const int dst_node, con |
|
|
|
const size_t size = task->size_ / task_count; |
|
|
|
const size_t last_size = size + task->size_ % task_count; |
|
|
|
|
|
|
|
std::cout << "[-] Splitting Copy into " << task_count << " tasks of " << size << "B 0x" << std::hex << (uint64_t)task->src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
// save the current numa node mask to restore later
|
|
|
|
// as executing the copy task will place this thread
|
|
|
|
// on a different node
|
|
|
@ -389,8 +397,6 @@ inline void dsacache::Cache::GetCacheNode(uint8_t* src, const size_t size, int* |
|
|
|
} |
|
|
|
|
|
|
|
inline void dsacache::Cache::Flush(const int node) { |
|
|
|
std::cout << "[-] Flushing Cache for " << (node == -1 ? "all nodes" : "node " + std::to_string(node)) << std::endl; |
|
|
|
|
|
|
|
// this lambda is used because below we have two code paths that
|
|
|
|
// flush nodes, either one single or all successively
|
|
|
|
|
|
|
@ -463,16 +469,12 @@ inline std::unique_ptr<dsacache::CacheData> dsacache::Cache::GetFromCache(uint8_ |
|
|
|
// now check whether the sizes match
|
|
|
|
|
|
|
|
if (search->second.size_ >= size) { |
|
|
|
std::cout << "[+] Found Cached version for 0x" << std::hex << (uint64_t)src << std::dec << std::endl; |
|
|
|
|
|
|
|
// return a unique copy of the entry which uses the object
|
|
|
|
// lifetime and destructor to safely handle deallocation
|
|
|
|
|
|
|
|
return std::move(std::make_unique<CacheData>(search->second)); |
|
|
|
} |
|
|
|
else { |
|
|
|
std::cout << "[!] Found Cached version with size missmatch for 0x" << std::hex << (uint64_t)src << std::dec << std::endl; |
|
|
|
|
|
|
|
// if the sizes missmatch then we clear the current entry from cache
|
|
|
|
// which will cause its deletion only after the last possible outside
|
|
|
|
// reference is also destroyed
|
|
|
@ -485,8 +487,6 @@ inline std::unique_ptr<dsacache::CacheData> dsacache::Cache::GetFromCache(uint8_ |
|
|
|
} |
|
|
|
|
|
|
|
inline dsacache::CacheData::CacheData(uint8_t* data, const size_t size) { |
|
|
|
std::cout << "[-] New CacheData 0x" << std::hex << (uint64_t)data << std::dec << std::endl; |
|
|
|
|
|
|
|
src_ = data; |
|
|
|
size_ = size; |
|
|
|
active_ = new std::atomic<int32_t>(1); |
|
|
@ -496,8 +496,6 @@ inline dsacache::CacheData::CacheData(uint8_t* data, const size_t size) { |
|
|
|
} |
|
|
|
|
|
|
|
inline dsacache::CacheData::CacheData(const dsacache::CacheData& other) { |
|
|
|
std::cout << "[-] Copy Created for CacheData 0x" << std::hex << (uint64_t)other.src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
// we copy the ptr to the global atomic reference counter
|
|
|
|
// and increase the amount of active references
|
|
|
|
|
|
|
@ -521,8 +519,6 @@ inline dsacache::CacheData::CacheData(const dsacache::CacheData& other) { |
|
|
|
} |
|
|
|
|
|
|
|
inline dsacache::CacheData::~CacheData() { |
|
|
|
std::cout << "[-] Destructor for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
// if this is the first instance of this cache structure
|
|
|
|
// and it has not been waited on and is now being destroyed
|
|
|
|
// we must wait on completion here to ensure the cache
|
|
|
@ -542,8 +538,6 @@ inline dsacache::CacheData::~CacheData() { |
|
|
|
// as this was the last reference
|
|
|
|
|
|
|
|
if (v <= 0) { |
|
|
|
std::cout << "[!] Full Destructor for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
Deallocate(); |
|
|
|
|
|
|
|
delete active_; |
|
|
@ -552,8 +546,6 @@ inline dsacache::CacheData::~CacheData() { |
|
|
|
} |
|
|
|
|
|
|
|
inline void dsacache::CacheData::Deallocate() { |
|
|
|
std::cout << "[!] Deallocating for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
// although deallocate should only be called from
|
|
|
|
// a safe context to do so, it can not hurt to
|
|
|
|
// defensively perform the operation atomically
|
|
|
@ -582,23 +574,17 @@ inline void dsacache::CacheData::WaitOnCompletion() { |
|
|
|
// are non-null or it is not
|
|
|
|
|
|
|
|
if (handlers_ == nullptr) { |
|
|
|
std::cout << "[-] Waiting on cache-var-update for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
// when no handlers are attached to this cache entry we wait on a
|
|
|
|
// value change for the cache structure from nullptr to non-null
|
|
|
|
// which will either go through immediately if the cache is valid
|
|
|
|
// already or wait until the handler-owning thread notifies us
|
|
|
|
|
|
|
|
cache_->wait(nullptr); |
|
|
|
|
|
|
|
std::cout << "[+] Finished waiting on cache-var-update for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl; |
|
|
|
} |
|
|
|
else { |
|
|
|
// when the handlers are non-null there are some DSA task handlers
|
|
|
|
// available on which we must wait here
|
|
|
|
|
|
|
|
std::cout << "[-] Waiting on handlers for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
// abort is set if any operation encountered an error
|
|
|
|
|
|
|
|
bool abort = false; |
|
|
@ -637,8 +623,6 @@ inline void dsacache::CacheData::WaitOnCompletion() { |
|
|
|
numa_free(incomplete_cache_, size_); |
|
|
|
} |
|
|
|
else { |
|
|
|
std::cout << "[+] Finished waiting on handlers for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl; |
|
|
|
|
|
|
|
// incomplete cache is now safe to use and therefore we
|
|
|
|
// swap it with the global cache state of this entry
|
|
|
|
// and notify potentially waiting threads
|
|
|
|