#pragma once #include #include #include #include #include #include #include #include #include namespace dml { inline const std::string StatusCodeToString(const dml::status_code code) { switch (code) { case dml::status_code::ok: return "ok"; case dml::status_code::false_predicate: return "false predicate"; case dml::status_code::partial_completion: return "partial completion"; case dml::status_code::nullptr_error: return "nullptr error"; case dml::status_code::bad_size: return "bad size"; case dml::status_code::bad_length: return "bad length"; case dml::status_code::inconsistent_size: return "inconsistent size"; case dml::status_code::dualcast_bad_padding: return "dualcast bad padding"; case dml::status_code::bad_alignment: return "bad alignment"; case dml::status_code::buffers_overlapping: return "buffers overlapping"; case dml::status_code::delta_delta_empty: return "delta delta empty"; case dml::status_code::batch_overflow: return "batch overflow"; case dml::status_code::execution_failed: return "execution failed"; case dml::status_code::unsupported_operation: return "unsupported operation"; case dml::status_code::queue_busy: return "queue busy"; case dml::status_code::error: return "unknown error"; case dml::status_code::config_error: return "config error"; default: return "unhandled error"; } } } namespace dsacache { constexpr bool WAIT_WEAK = true; class Cache; /* * Class Description: * Holds all required information on one cache entry and is used * both internally by the Cache and externally by the user. * * Important Usage Notes: * The pointer is only updated in WaitOnCompletion() which * therefore must be called by the user at some point in order * to use the cached data. Using this class as T for * std::shared_ptr is not recommended as references are * already counted internally. * * Cache Lifetime: * As long as the instance is referenced, the pointer it stores * is guaranteed to be either nullptr or pointing to a valid copy. * * Implementation Detail: * Performs self-reference counting with a shared atomic integer. * Therefore on creating a copy the reference count is increased * and with the destructor it is deacresed. If the last copy is * destroyed the actual underlying data is freed and all shared * variables deleted. * * Notes on Thread Safety: * Class is thread safe in any possible state and performs * reference counting and deallocation itself entirely atomically. */ class CacheData { public: using dml_handler = dml::handler>; private: static constexpr uint64_t maxptr = 0xffff'ffff'ffff'ffff; // set to false if we do not own the cache pointer bool delete_ = false; // data source and size of the block uint8_t* src_; size_t size_; // global reference counting object std::atomic* active_; // global cache-location pointer std::atomic* cache_; // object-local incomplete cache location pointer // contract: only access when being in sole posession of handlers uint8_t** incomplete_cache_; // dml handler vector pointer which is used // to wait on caching task completion std::atomic* handler_; // deallocates the global cache-location // and invalidates it void Deallocate(); size_t GetSize() const { return size_; } uint8_t* GetSource() const { return src_; } int32_t GetRefCount() const { return active_->load(); } void SetTaskHandlerAndCache(uint8_t* cache, dml_handler* handler); // initializes the class after which it is thread safe // but may only be destroyed safely after setting handlers void Init(); friend Cache; public: CacheData(uint8_t* data, const size_t size); CacheData(const CacheData& other); ~CacheData(); // waits on completion of caching operations // for this task and is safe to be called in // any state of the object void WaitOnCompletion(const bool weak = false); // returns the cache data location for this // instance which is valid as long as the // instance is alive - !!! this may also // yield a nullptr !!! uint8_t* GetDataLocation() const { return cache_->load(); } }; /* * Class Description: * Class will handle access to data through internal copies. * These are obtained via work submission to the Intel DSA which takes * care of asynchronously duplicating the data. The user will define * where these copies lie and which system nodes will perform the copy. * This is done through policy functions set during initialization. * * Placement Policy: * The Placement Policy Function decides on which node a particular * entry is to be placed, given the current executing node and the * data source node and data size. This in turn means that for one * datum, multiple cached copies may exist at one time. * * Cache Lifetime: * When accessing the cache, a CacheData-object will be returned. * As long as this object lives, the pointer which it holds is * guaranteed to be either nullptr or a valid copy. When destroyed * the entry is marked for deletion which is only carried out * when system memory pressure drives an automated cache flush. * * Restrictions: * - Overlapping Pointers may lead to undefined behaviour during * manual cache invalidation which should not be used if you * intend to have these types of pointers * - Cache Invalidation may only be performed manually and gives * no ordering guarantees. Therefore, it is the users responsibility * to ensure that results after invalidation have been generated * using the latest state of data. The cache is best suited * to static data. * * Notes on Thread Safety: * - Cache is completely thread-safe after initialization * - CacheData-class will handle deallocation of data itself by * performing self-reference-counting atomically and only * deallocating if the last reference is destroyed * - The internal cache state has one lock which is either * acquired shared for reading the state (upon accessing an already * cached element) or unique (accessing a new element, flushing, invalidating) * - Waiting on copy completion is done over an atomic-wait in copies * of the original CacheData-instance * - Overall this class may experience performance issues due to the use * of locking (in any configuration), lock contention (worsens with higher * core count, node count and utilization) and atomics (worse in the same * situations as lock contention) * * Improving Performance: * When data is never shared between threads or memory size for the cache is * not an issue you may consider having one Cache-instance per thread and removing * the lock in Cache and modifying the reference counting and waiting mechanisms * of CacheData accordingly (although this is high effort and will yield little due * to the atomics not being shared among cores/nodes). * Otherwise, one Cache-instance per node could also be considered. This will allow * the placement policy function to be barebones and reduces the lock contention and * synchronization impact of the atomic variables. */ class Cache { public: // cache policy is defined as a type here to allow flexible usage of the cacher // given a numa destination node (where the data will be needed), the numa source // node (current location of the data) and the data size, this function should // return optimal cache placement // dst node and returned value can differ if the system, for example, has HBM // attached accessible directly to node n under a different node id m typedef int (CachePolicy)(const int numa_dst_node, const int numa_src_node, const size_t data_size); // copy policy specifies the copy-executing nodes for a given task // which allows flexibility in assignment for optimizing raw throughput // or choosing a conservative usage policy typedef std::vector (CopyPolicy)(const int numa_dst_node, const int numa_src_node, const size_t data_size); private: // mutex for accessing the cache state map // map from [dst-numa-node,map2] // map2 from [data-ptr,cache-structure] struct LockedNodeCacheState { std::shared_mutex cache_mutex_; std::unordered_map node_cache_state_; }; std::unordered_map cache_state_; CachePolicy* cache_policy_function_ = nullptr; CopyPolicy* copy_policy_function_ = nullptr; // function used to submit a copy task on a specific node to the dml // engine on that node - will change the current threads node assignment // to achieve this so take care to restore this dml::handler> ExecuteCopy( const uint8_t* src, uint8_t* dst, const size_t size, const int node ) const; // allocates the required memory on the destination node // and then submits task to the dml library for processing // and attaches the handlers to the cache data structure void SubmitTask(CacheData* task, const int dst_node, const int src_node); // querries the policy functions for the given data and size // to obtain destination cache node, also returns the datas // source node for further usage // output may depend on the calling threads node assignment // as this is set as the "optimal placement" node void GetCacheNode(uint8_t* src, const size_t size, int* OUT_DST_NODE, int* OUT_SRC_NODE) const; // allocates memory of size "size" on the numa node "node" // and returns nullptr if this is not possible, also may // try to flush the cache of the requested node to // alleviate encountered shortage uint8_t* AllocOnNode(const size_t size, const int node); // checks whether the cache contains an entry for // the given data in the given memory node and // returns it, otherwise returns nullptr std::unique_ptr GetFromCache(uint8_t* src, const size_t size, const int dst_node); public: ~Cache(); Cache() = default; Cache(const Cache& other) = delete; // initializes the cache with the two policy functions // only after this is it safe to use in a threaded environment void Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function); // function to perform data access through the cache std::unique_ptr Access(uint8_t* data, const size_t size); // flushes the cache of inactive entries // if node is -1 then the whole cache is // checked and otherwise the specified // node - no checks on node validity void Flush(const int node = -1); // forces out all entries from the // cache and therefore will also "forget" // still-in-use entries, these will still // be properly deleted, but the cache // will be fresh - use for testing void Clear(); void Invalidate(uint8_t* data); }; } inline void dsacache::Cache::Clear() { for (auto& nc : cache_state_) { std::unique_lock lock(nc.second->cache_mutex_); nc.second->node_cache_state_.clear(); } } inline void dsacache::Cache::Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function) { cache_policy_function_ = cache_policy_function; copy_policy_function_ = copy_policy_function; // initialize numa library numa_available(); // obtain all available nodes // and those we may allocate // memory on const int nodes_max = numa_num_configured_nodes(); const bitmask* valid_nodes = numa_get_mems_allowed(); // prepare the cache state with entries // for all given nodes for (int node = 0; node < nodes_max; node++) { if (numa_bitmask_isbitset(valid_nodes, node)) { void* block = numa_alloc_onnode(sizeof(LockedNodeCacheState), node); auto* state = new(block)LockedNodeCacheState; cache_state_.insert({node,state}); } } } inline std::unique_ptr dsacache::Cache::Access(uint8_t* data, const size_t size) { // get destination numa node for the cache int dst_node = -1; int src_node = -1; GetCacheNode(data, size, &dst_node, &src_node); // TODO: at this point it could be beneficial to check whether // TODO: the given destination node is present as an entry // TODO: in the cache state to see if it is valid // check whether the data is already cached std::unique_ptr task = GetFromCache(data, size, dst_node); if (task != nullptr) { return std::move(task); } // at this point the requested data is not present in cache // and we create a caching task for it task = std::make_unique(data, size); { LockedNodeCacheState* local_cache_state = cache_state_[dst_node]; std::unique_lock lock(local_cache_state->cache_mutex_); const auto state = local_cache_state->node_cache_state_.emplace(task->GetSource(), *task); // if state.second is false then no insertion took place // which means that concurrently whith this thread // some other thread must have accessed the same // resource in which case we return the other // threads data cache structure if (!state.second) { return std::move(std::make_unique(state.first->second)); } // initialize the task now for thread safety // as we are now sure that we will submit work // to it and will not delete it beforehand task->Init(); } SubmitTask(task.get(), dst_node, src_node); return std::move(task); } inline uint8_t* dsacache::Cache::AllocOnNode(const size_t size, const int node) { // allocate data on this node and flush the unused parts of the // cache if the operation fails and retry once // TODO: smarter flush strategy could keep some stuff cached // check currently free memory to see if the data fits long long int free_space = 0; numa_node_size64(node, &free_space); if (free_space < size) { // dst node lacks memory space so we flush the cache for this // node hoping to free enough currently unused entries to make // the second allocation attempt successful Flush(node); // re-test by getting the free space and checking again numa_node_size64(node, &free_space); if (free_space < size) { return nullptr; } } uint8_t* dst = reinterpret_cast(numa_alloc_onnode(size, node)); if (dst == nullptr) { return nullptr; } return dst; } inline void dsacache::Cache::SubmitTask(CacheData* task, const int dst_node, const int src_node) { static thread_local int last_node_index = -1; // stores the last node used for the local thread so we can achieve some // load balancing which locally might look like round robin, but considering // that one source thread may see different results for "executing_nodes" with // different sizes, and that multiple threads will submit, in reality we // achieve a "wild-west-style" load balance here uint8_t* dst = AllocOnNode(task->GetSize(), dst_node); if (dst == nullptr) { return; } // querry copy policy function for the nodes available to use for the copy const std::vector executing_nodes = copy_policy_function_(dst_node, src_node, task->GetSize()); // use our load balancing method and determine node for this task last_node_index = ++last_node_index % executing_nodes.size(); const int node = executing_nodes[last_node_index]; // submit the copy and attach it to the task entry auto* handler = new CacheData::dml_handler(); *handler = ExecuteCopy(task->GetSource(), dst, task->GetSize(), node); task->SetTaskHandlerAndCache(dst, handler); } inline dml::handler> dsacache::Cache::ExecuteCopy( const uint8_t* src, uint8_t* dst, const size_t size, const int node ) const { dml::const_data_view srcv = dml::make_view(src, size); dml::data_view dstv = dml::make_view(dst, size); return dml::submit( dml::mem_copy.block_on_fault(), srcv, dstv, dml::execution_interface>(), node ); } inline void dsacache::Cache::GetCacheNode(uint8_t* src, const size_t size, int* OUT_DST_NODE, int* OUT_SRC_NODE) const { // obtain numa node of current thread to determine where the data is needed const int current_cpu = sched_getcpu(); const int current_node = numa_node_of_cpu(current_cpu); // obtain node that the given data pointer is allocated on *OUT_SRC_NODE = -1; get_mempolicy(OUT_SRC_NODE, NULL, 0, (void*)src, MPOL_F_NODE | MPOL_F_ADDR); // querry cache policy function for the destination numa node *OUT_DST_NODE = cache_policy_function_(current_node, *OUT_SRC_NODE, size); } inline void dsacache::Cache::Flush(const int node) { // this lambda is used because below we have two code paths that // flush nodes, either one single or all successively const auto FlushNode = [](std::unordered_map& map) { // begin at the front of the map auto it = map.begin(); // loop until we reach the end of the map while (it != map.end()) { // if the iterator points to an inactive element // then we may erase it if (it->second.GetRefCount() <= 1) { // erase the iterator from the map map.erase(it); // as the erasure invalidated out iterator // we must start at the beginning again it = map.begin(); } else { // if element is active just move over to the next one it++; } } }; // we require exclusive lock as we modify the cache state // node == -1 means that cache on all nodes should be flushed if (node == -1) { for (auto& nc : cache_state_) { std::unique_lock lock(nc.second->cache_mutex_); FlushNode(nc.second->node_cache_state_); } } else { std::unique_lock lock(cache_state_[node]->cache_mutex_); FlushNode(cache_state_[node]->node_cache_state_); } } inline std::unique_ptr dsacache::Cache::GetFromCache(uint8_t* src, const size_t size, const int dst_node) { // the best situation is if this data is already cached // which we check in an unnamed block in which the cache // is locked for reading to prevent another thread // from marking the element we may find as unused and // clearing it LockedNodeCacheState* local_cache_state = cache_state_[dst_node]; // lock the cache state in shared-mode because we read std::shared_lock lock(local_cache_state->cache_mutex_); // search for the data in our cache state structure at the given node const auto search = local_cache_state->node_cache_state_.find(src); // if the data is in our structure we continue if (search != local_cache_state->node_cache_state_.end()) { // now check whether the sizes match if (search->second.GetSize() >= size) { // return a unique copy of the entry which uses the object // lifetime and destructor to safely handle deallocation return std::move(std::make_unique(search->second)); } else { // if the sizes missmatch then we clear the current entry from cache // which will cause its deletion only after the last possible outside // reference is also destroyed local_cache_state->node_cache_state_.erase(search); } } return nullptr; } void dsacache::Cache::Invalidate(uint8_t* data) { // as the cache is modified we must obtain a unique writers lock // loop through all per-node-caches available for (auto node : cache_state_) { std::unique_lock lock(node.second->cache_mutex_); // search for an entry for the given data pointer auto search = node.second->node_cache_state_.find(data); if (search != node.second->node_cache_state_.end()) { // if the data is represented in-cache // then it will be erased to re-trigger // caching on next access node.second->node_cache_state_.erase(search); } } } inline dsacache::Cache::~Cache() { for (auto node : cache_state_) { node.second->~LockedNodeCacheState(); numa_free(reinterpret_cast(node.second), sizeof(LockedNodeCacheState)); } } inline dsacache::CacheData::CacheData(uint8_t* data, const size_t size) { src_ = data; size_ = size; delete_ = false; active_ = new std::atomic(1); cache_ = new std::atomic(data); handler_ = new std::atomic(nullptr); incomplete_cache_ = new uint8_t*(nullptr); } inline dsacache::CacheData::CacheData(const dsacache::CacheData& other) { // we copy the ptr to the global atomic reference counter // and increase the amount of active references active_ = other.active_; const int current_active = active_->fetch_add(1); src_ = other.src_; size_ = other.size_; cache_ = other.cache_; incomplete_cache_ = other.incomplete_cache_; handler_ = other.handler_; } inline dsacache::CacheData::~CacheData() { // due to fetch_sub returning the preivously held value // we must subtract one locally to get the current value const int32_t v = active_->fetch_sub(1) - 1; // if the returned value is zero or lower // then we must execute proper deletion // as this was the last reference if (v == 0) { // on deletion we must ensure that all offloaded // operations have completed successfully WaitOnCompletion(); // only then can we deallocate the memory Deallocate(); delete active_; delete cache_; delete handler_; delete incomplete_cache_; } } inline void dsacache::CacheData::Deallocate() { // although deallocate should only be called from // a safe context to do so, it can not hurt to // defensively perform the operation atomically // and check for incomplete cache if no deallocation // takes place for the retrieved local cache uint8_t* cache_local = cache_->exchange(nullptr); if (cache_local != nullptr && delete_) numa_free(cache_local, size_); else if (*incomplete_cache_ != nullptr) numa_free(*incomplete_cache_, size_); else; } inline void dsacache::CacheData::WaitOnCompletion(const bool weak) { // first check if waiting is even neccessary as a valid // cache pointer signals that no waiting is to be performed if (cache_->load() != nullptr) { return; } // then check if the handlers are available handler_->wait(nullptr); // exchange the global handlers pointer with nullptr to have a local // copy - this signals that this thread is the sole owner and therefore // responsible for waiting for them. we can not set to nullptr here but // set to maximum of 64-bit in order to prevent deadlocks from the above // waiting construct dml_handler* local_handler = handler_->exchange(reinterpret_cast(maxptr)); // ensure that no other thread snatched the handlers before us // and in case one did, wait again and then return if (local_handler == nullptr || local_handler == reinterpret_cast(maxptr)) { cache_->wait(nullptr); return; } // at this point we are responsible for waiting for the handlers // and handling any error that comes through them gracefully if (weak && !local_handler->is_finished()) { handler_->store(local_handler); return; } // perform the wait auto result = local_handler->get(); // at this point handlers has been waited for // and therefore may be decomissioned delete local_handler; // if the copy tasks failed we abort the whole task // otherwise the cache will be set to valid now if (result.status != dml::status_code::ok) { cache_->store(src_); numa_free(*incomplete_cache_, size_); delete_ = false; *incomplete_cache_ = nullptr; } else { cache_->store(*incomplete_cache_); } // notify all waiting threads so they wake up quickly cache_->notify_all(); handler_->notify_all(); } void dsacache::CacheData::SetTaskHandlerAndCache(uint8_t* cache, dml_handler* handler) { *incomplete_cache_ = cache; handler_->store(handler); handler_->notify_one(); } void dsacache::CacheData::Init() { cache_->store(nullptr); delete_ = true; }