This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

783 lines
28 KiB

  1. #pragma once
  2. #include <iostream>
  3. #include <unordered_map>
  4. #include <shared_mutex>
  5. #include <mutex>
  6. #include <memory>
  7. #include <sched.h>
  8. #include <numa.h>
  9. #include <numaif.h>
  10. #include <dml/dml.hpp>
  11. namespace dml {
  12. inline const std::string StatusCodeToString(const dml::status_code code) {
  13. switch (code) {
  14. case dml::status_code::ok: return "ok";
  15. case dml::status_code::false_predicate: return "false predicate";
  16. case dml::status_code::partial_completion: return "partial completion";
  17. case dml::status_code::nullptr_error: return "nullptr error";
  18. case dml::status_code::bad_size: return "bad size";
  19. case dml::status_code::bad_length: return "bad length";
  20. case dml::status_code::inconsistent_size: return "inconsistent size";
  21. case dml::status_code::dualcast_bad_padding: return "dualcast bad padding";
  22. case dml::status_code::bad_alignment: return "bad alignment";
  23. case dml::status_code::buffers_overlapping: return "buffers overlapping";
  24. case dml::status_code::delta_delta_empty: return "delta delta empty";
  25. case dml::status_code::batch_overflow: return "batch overflow";
  26. case dml::status_code::execution_failed: return "execution failed";
  27. case dml::status_code::unsupported_operation: return "unsupported operation";
  28. case dml::status_code::queue_busy: return "queue busy";
  29. case dml::status_code::error: return "unknown error";
  30. case dml::status_code::config_error: return "config error";
  31. default: return "unhandled error";
  32. }
  33. }
  34. }
  35. namespace dsacache {
  36. inline bool CheckFlag(const uint64_t value, const uint64_t flag) {
  37. return (value & flag) != 0;
  38. }
  39. inline uint64_t UnsetFlag(const uint64_t value, const uint64_t flag) {
  40. return value & (~flag);
  41. }
  42. inline uint64_t SetFlag(const uint64_t value, const uint64_t flag) {
  43. return value | flag;
  44. }
  45. constexpr uint64_t FLAG_WAIT_WEAK = 0b1ULL << 63;
  46. constexpr uint64_t FLAG_HANDLE_PF = 0b1ULL << 62;
  47. constexpr uint64_t FLAG_ACCESS_WEAK = 0b1ULL << 61;
  48. constexpr uint64_t FLAG_DEFAULT = 0ULL;
  49. class Cache;
  50. /*
  51. * Class Description:
  52. * Holds all required information on one cache entry and is used
  53. * both internally by the Cache and externally by the user.
  54. *
  55. * Important Usage Notes:
  56. * The pointer is only updated in WaitOnCompletion() which
  57. * therefore must be called by the user at some point in order
  58. * to use the cached data. Using this class as T for
  59. * std::shared_ptr<T> is not recommended as references are
  60. * already counted internally.
  61. *
  62. * Cache Lifetime:
  63. * As long as the instance is referenced, the pointer it stores
  64. * is guaranteed to be either nullptr or pointing to a valid copy.
  65. *
  66. * Implementation Detail:
  67. * Performs self-reference counting with a shared atomic integer.
  68. * Therefore on creating a copy the reference count is increased
  69. * and with the destructor it is deacresed. If the last copy is
  70. * destroyed the actual underlying data is freed and all shared
  71. * variables deleted.
  72. *
  73. * Notes on Thread Safety:
  74. * Class is thread safe in any possible state and performs
  75. * reference counting and deallocation itself entirely atomically.
  76. */
  77. class CacheData {
  78. public:
  79. using dml_handler = dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>>;
  80. private:
  81. static constexpr uint64_t maxptr = 0xffff'ffff'ffff'ffff;
  82. // set to false if we do not own the cache pointer
  83. bool delete_ = false;
  84. // data source and size of the block
  85. uint8_t* src_;
  86. size_t size_;
  87. // global reference counting object
  88. std::atomic<int32_t>* active_;
  89. // global cache-location pointer
  90. std::atomic<uint8_t*>* cache_;
  91. // object-local incomplete cache location pointer
  92. // contract: only access when being in sole posession of handlers
  93. uint8_t** incomplete_cache_;
  94. // flags inherited from parent cache
  95. uint64_t flags_ = 0;
  96. // dml handler vector pointer which is used
  97. // to wait on caching task completion
  98. std::atomic<std::vector<dml_handler>*>* handlers_;
  99. // deallocates the global cache-location
  100. // and invalidates it
  101. void Deallocate();
  102. size_t GetSize() const { return size_; }
  103. uint8_t* GetSource() const { return src_; }
  104. int32_t GetRefCount() const { return active_->load(); }
  105. void SetCacheToSource() { cache_->store(src_); delete_ = false; }
  106. void SetTaskHandlersAndCache(uint8_t* cache, std::vector<dml_handler>* handlers);
  107. // initializes the class after which it is thread safe
  108. // but may only be destroyed safely after setting handlers
  109. void Init();
  110. friend Cache;
  111. public:
  112. CacheData(uint8_t* data, const size_t size);
  113. CacheData(const CacheData& other);
  114. ~CacheData();
  115. // waits on completion of caching operations
  116. // for this task and is safe to be called in
  117. // any state of the object
  118. void WaitOnCompletion();
  119. // returns the cache data location for this
  120. // instance which is valid as long as the
  121. // instance is alive - !!! this may also
  122. // yield a nullptr !!!
  123. uint8_t* GetDataLocation() const { return cache_->load(); }
  124. void SetFlags(const uint64_t flags) { flags_ = flags; }
  125. uint64_t GetFlags() const { return flags_; }
  126. };
  127. /*
  128. * Class Description:
  129. * Class will handle access to data through internal copies.
  130. * These are obtained via work submission to the Intel DSA which takes
  131. * care of asynchronously duplicating the data. The user will define
  132. * where these copies lie and which system nodes will perform the copy.
  133. * This is done through policy functions set during initialization.
  134. *
  135. * Placement Policy:
  136. * The Placement Policy Function decides on which node a particular
  137. * entry is to be placed, given the current executing node and the
  138. * data source node and data size. This in turn means that for one
  139. * datum, multiple cached copies may exist at one time.
  140. *
  141. * Cache Lifetime:
  142. * When accessing the cache, a CacheData-object will be returned.
  143. * As long as this object lives, the pointer which it holds is
  144. * guaranteed to be either nullptr or a valid copy. When destroyed
  145. * the entry is marked for deletion which is only carried out
  146. * when system memory pressure drives an automated cache flush.
  147. *
  148. * Restrictions:
  149. * - Overlapping Pointers may lead to undefined behaviour during
  150. * manual cache invalidation which should not be used if you
  151. * intend to have these types of pointers
  152. * - Cache Invalidation may only be performed manually and gives
  153. * no ordering guarantees. Therefore, it is the users responsibility
  154. * to ensure that results after invalidation have been generated
  155. * using the latest state of data. The cache is best suited
  156. * to static data.
  157. *
  158. * Notes on Thread Safety:
  159. * - Cache is completely thread-safe after initialization
  160. * - CacheData-class will handle deallocation of data itself by
  161. * performing self-reference-counting atomically and only
  162. * deallocating if the last reference is destroyed
  163. * - The internal cache state has one lock which is either
  164. * acquired shared for reading the state (upon accessing an already
  165. * cached element) or unique (accessing a new element, flushing, invalidating)
  166. * - Waiting on copy completion is done over an atomic-wait in copies
  167. * of the original CacheData-instance
  168. * - Overall this class may experience performance issues due to the use
  169. * of locking (in any configuration), lock contention (worsens with higher
  170. * core count, node count and utilization) and atomics (worse in the same
  171. * situations as lock contention)
  172. *
  173. * Improving Performance:
  174. * When data is never shared between threads or memory size for the cache is
  175. * not an issue you may consider having one Cache-instance per thread and removing
  176. * the lock in Cache and modifying the reference counting and waiting mechanisms
  177. * of CacheData accordingly (although this is high effort and will yield little due
  178. * to the atomics not being shared among cores/nodes).
  179. * Otherwise, one Cache-instance per node could also be considered. This will allow
  180. * the placement policy function to be barebones and reduces the lock contention and
  181. * synchronization impact of the atomic variables.
  182. */
  183. class Cache {
  184. public:
  185. // cache policy is defined as a type here to allow flexible usage of the cacher
  186. // given a numa destination node (where the data will be needed), the numa source
  187. // node (current location of the data) and the data size, this function should
  188. // return optimal cache placement
  189. // dst node and returned value can differ if the system, for example, has HBM
  190. // attached accessible directly to node n under a different node id m
  191. typedef int (CachePolicy)(const int numa_dst_node, const int numa_src_node, const size_t data_size);
  192. // copy policy specifies the copy-executing nodes for a given task
  193. // which allows flexibility in assignment for optimizing raw throughput
  194. // or choosing a conservative usage policy
  195. typedef std::vector<int> (CopyPolicy)(const int numa_dst_node, const int numa_src_node, const size_t data_size);
  196. private:
  197. // flags to store options duh
  198. uint64_t flags_ = 0;
  199. // map from [dst-numa-node,map2]
  200. // map2 from [data-ptr,cache-structure]
  201. struct LockedNodeCacheState {
  202. std::shared_mutex cache_mutex_;
  203. std::unordered_map<uint8_t*, CacheData> node_cache_state_;
  204. };
  205. std::unordered_map<uint8_t, LockedNodeCacheState*> cache_state_;
  206. CachePolicy* cache_policy_function_ = nullptr;
  207. CopyPolicy* copy_policy_function_ = nullptr;
  208. // function used to submit a copy task on a specific node to the dml
  209. // engine on that node - will change the current threads node assignment
  210. // to achieve this so take care to restore this
  211. dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>> ExecuteCopy(
  212. const uint8_t* src, uint8_t* dst, const size_t size, const int node
  213. ) const;
  214. // allocates the required memory on the destination node
  215. // and then submits task to the dml library for processing
  216. // and attaches the handlers to the cache data structure
  217. void SubmitTask(CacheData* task, const int dst_node, const int src_node);
  218. // querries the policy functions for the given data and size
  219. // to obtain destination cache node, also returns the datas
  220. // source node for further usage
  221. // output may depend on the calling threads node assignment
  222. // as this is set as the "optimal placement" node
  223. void GetCacheNode(uint8_t* src, const size_t size, int* OUT_DST_NODE, int* OUT_SRC_NODE) const;
  224. // allocates memory of size "size" on the numa node "node"
  225. // and returns nullptr if this is not possible, also may
  226. // try to flush the cache of the requested node to
  227. // alleviate encountered shortage
  228. uint8_t* AllocOnNode(const size_t size, const int node);
  229. // checks whether the cache contains an entry for
  230. // the given data in the given memory node and
  231. // returns it, otherwise returns nullptr
  232. std::unique_ptr<CacheData> GetFromCache(uint8_t* src, const size_t size, const int dst_node);
  233. public:
  234. ~Cache();
  235. Cache() = default;
  236. Cache(const Cache& other) = delete;
  237. // initializes the cache with the two policy functions
  238. // only after this is it safe to use in a threaded environment
  239. void Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function);
  240. // function to perform data access through the cache, behaviour depends
  241. // on flags, by default will also perform prefetch, otherwise with
  242. // FLAG_ACCESS_WEAK set will not perform prefetch and instead return
  243. // a cache entry with the data source as cache location on cache miss,
  244. // this flag must be set for each invocation, the flags set for the
  245. // entire cache will not be evaluated for this
  246. std::unique_ptr<CacheData> Access(uint8_t* data, const size_t size, const uint64_t flags = FLAG_DEFAULT);
  247. // flushes the cache of inactive entries
  248. // if node is -1 then the whole cache is
  249. // checked and otherwise the specified
  250. // node - no checks on node validity
  251. void Flush(const int node = -1);
  252. // forces out all entries from the
  253. // cache and therefore will also "forget"
  254. // still-in-use entries, these will still
  255. // be properly deleted, but the cache
  256. // will be fresh - use for testing
  257. void Clear();
  258. void Invalidate(uint8_t* data);
  259. void SetFlags(const uint64_t flags) { flags_ = flags; }
  260. uint64_t GetFlags() { return flags_; }
  261. };
  262. }
  263. inline void dsacache::Cache::Clear() {
  264. for (auto& nc : cache_state_) {
  265. std::unique_lock<std::shared_mutex> lock(nc.second->cache_mutex_);
  266. nc.second->node_cache_state_.clear();
  267. }
  268. }
  269. inline void dsacache::Cache::Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function) {
  270. cache_policy_function_ = cache_policy_function;
  271. copy_policy_function_ = copy_policy_function;
  272. // initialize numa library
  273. numa_available();
  274. // obtain all available nodes
  275. // and those we may allocate
  276. // memory on
  277. const int nodes_max = numa_num_configured_nodes();
  278. const bitmask* valid_nodes = numa_get_mems_allowed();
  279. // prepare the cache state with entries
  280. // for all given nodes
  281. for (int node = 0; node < nodes_max; node++) {
  282. if (numa_bitmask_isbitset(valid_nodes, node)) {
  283. void* block = numa_alloc_onnode(sizeof(LockedNodeCacheState), node);
  284. auto* state = new(block)LockedNodeCacheState;
  285. cache_state_.insert({node,state});
  286. }
  287. }
  288. }
  289. inline std::unique_ptr<dsacache::CacheData> dsacache::Cache::Access(uint8_t* data, const size_t size, const uint64_t flags) {
  290. // get destination numa node for the cache
  291. int dst_node = -1;
  292. int src_node = -1;
  293. GetCacheNode(data, size, &dst_node, &src_node);
  294. // check whether the data is already cached
  295. std::unique_ptr<CacheData> task = GetFromCache(data, size, dst_node);
  296. if (task != nullptr) {
  297. return std::move(task);
  298. }
  299. // at this point the requested data is not present in cache
  300. // and we create a caching task for it, copying our current flags
  301. task = std::make_unique<CacheData>(data, size);
  302. task->SetFlags(flags_);
  303. // when the ACCESS_WEAK flag is set for the flags parameter (!)
  304. // and we have reached this point, there was no cache entry
  305. // present for the requested data and therefore we abort
  306. // but to keep validity, we return the previously created
  307. // CacheData struct, setting the cache variable to the
  308. // data source location
  309. if (CheckFlag(flags, FLAG_ACCESS_WEAK)) {
  310. std::cerr << "[!] CacheAccess with WEAK set encountered miss!" << std::endl;
  311. task->SetCacheToSource();
  312. return std::move(task);
  313. }
  314. // the following operation adds the task to the cache state
  315. // which requires unique locking of the current nodes entry
  316. {
  317. LockedNodeCacheState* local_cache_state = cache_state_[dst_node];
  318. std::unique_lock<std::shared_mutex> lock(local_cache_state->cache_mutex_);
  319. const auto state = local_cache_state->node_cache_state_.emplace(task->GetSource(), *task);
  320. // if state.second is false then no insertion took place
  321. // which means that concurrently whith this thread
  322. // some other thread must have accessed the same
  323. // resource in which case we return the other
  324. // threads data cache structure
  325. if (!state.second) {
  326. return std::move(std::make_unique<CacheData>(state.first->second));
  327. }
  328. // initialize the task now for thread safety
  329. // as we are now sure that we will submit work
  330. // to it and will not delete it beforehand
  331. task->Init();
  332. }
  333. SubmitTask(task.get(), dst_node, src_node);
  334. return std::move(task);
  335. }
  336. inline uint8_t* dsacache::Cache::AllocOnNode(const size_t size, const int node) {
  337. // allocate data on this node and flush the unused parts of the
  338. // cache if the operation fails and retry once
  339. // TODO: smarter flush strategy could keep some stuff cached
  340. // check currently free memory to see if the data fits
  341. long long int free_space = 0;
  342. numa_node_size64(node, &free_space);
  343. if (free_space < size) {
  344. // dst node lacks memory space so we flush the cache for this
  345. // node hoping to free enough currently unused entries to make
  346. // the second allocation attempt successful
  347. Flush(node);
  348. // re-test by getting the free space and checking again
  349. numa_node_size64(node, &free_space);
  350. if (free_space < size) {
  351. return nullptr;
  352. }
  353. }
  354. uint8_t* dst = reinterpret_cast<uint8_t*>(numa_alloc_onnode(size, node));
  355. if (dst == nullptr) {
  356. return nullptr;
  357. }
  358. return dst;
  359. }
  360. inline void dsacache::Cache::SubmitTask(CacheData* task, const int dst_node, const int src_node) {
  361. uint8_t* dst = AllocOnNode(task->GetSize(), dst_node);
  362. if (dst == nullptr) {
  363. return;
  364. }
  365. // querry copy policy function for the nodes to use for the copy
  366. const std::vector<int> executing_nodes = copy_policy_function_(dst_node, src_node, task->GetSize());
  367. const size_t task_count = executing_nodes.size();
  368. // each task will copy one fair part of the total size
  369. // and in case the total size is not a factor of the
  370. // given task count the last node must copy the remainder
  371. const size_t size = task->GetSize() / task_count;
  372. const size_t last_size = size + task->GetSize() % task_count;
  373. // save the current numa node mask to restore later
  374. // as executing the copy task will place this thread
  375. // on a different node
  376. auto handlers = new std::vector<CacheData::dml_handler>();
  377. for (uint32_t i = 0; i < task_count; i++) {
  378. const size_t local_size = i + 1 == task_count ? size : last_size;
  379. const size_t local_offset = i * size;
  380. const uint8_t* local_src = task->GetSource() + local_offset;
  381. uint8_t* local_dst = dst + local_offset;
  382. handlers->emplace_back(ExecuteCopy(local_src, local_dst, local_size, executing_nodes[i]));
  383. }
  384. task->SetTaskHandlersAndCache(dst, handlers);
  385. }
  386. inline dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>> dsacache::Cache::ExecuteCopy(
  387. const uint8_t* src, uint8_t* dst, const size_t size, const int node
  388. ) const {
  389. dml::const_data_view srcv = dml::make_view(src, size);
  390. dml::data_view dstv = dml::make_view(dst, size);
  391. if (CheckFlag(flags_, FLAG_HANDLE_PF)) {
  392. return dml::submit<dml::hardware>(
  393. dml::mem_copy.block_on_fault(), srcv, dstv,
  394. dml::execution_interface<dml::hardware,std::allocator<uint8_t>>(), node
  395. );
  396. }
  397. else {
  398. return dml::submit<dml::hardware>(
  399. dml::mem_copy, srcv, dstv,
  400. dml::execution_interface<dml::hardware,std::allocator<uint8_t>>(), node
  401. );
  402. }
  403. }
  404. inline void dsacache::Cache::GetCacheNode(uint8_t* src, const size_t size, int* OUT_DST_NODE, int* OUT_SRC_NODE) const {
  405. // obtain numa node of current thread to determine where the data is needed
  406. const int current_cpu = sched_getcpu();
  407. const int current_node = numa_node_of_cpu(current_cpu);
  408. // obtain node that the given data pointer is allocated on
  409. *OUT_SRC_NODE = -1;
  410. get_mempolicy(OUT_SRC_NODE, NULL, 0, (void*)src, MPOL_F_NODE | MPOL_F_ADDR);
  411. // querry cache policy function for the destination numa node
  412. *OUT_DST_NODE = cache_policy_function_(current_node, *OUT_SRC_NODE, size);
  413. }
  414. inline void dsacache::Cache::Flush(const int node) {
  415. // this lambda is used because below we have two code paths that
  416. // flush nodes, either one single or all successively
  417. const auto FlushNode = [](std::unordered_map<uint8_t*,CacheData>& map) {
  418. // begin at the front of the map
  419. auto it = map.begin();
  420. // loop until we reach the end of the map
  421. while (it != map.end()) {
  422. // if the iterator points to an inactive element
  423. // then we may erase it
  424. if (it->second.GetRefCount() <= 1) {
  425. // erase the iterator from the map
  426. map.erase(it);
  427. // as the erasure invalidated out iterator
  428. // we must start at the beginning again
  429. it = map.begin();
  430. }
  431. else {
  432. // if element is active just move over to the next one
  433. it++;
  434. }
  435. }
  436. };
  437. // we require exclusive lock as we modify the cache state
  438. // node == -1 means that cache on all nodes should be flushed
  439. if (node == -1) {
  440. for (auto& nc : cache_state_) {
  441. std::unique_lock<std::shared_mutex> lock(nc.second->cache_mutex_);
  442. FlushNode(nc.second->node_cache_state_);
  443. }
  444. }
  445. else {
  446. std::unique_lock<std::shared_mutex> lock(cache_state_[node]->cache_mutex_);
  447. FlushNode(cache_state_[node]->node_cache_state_);
  448. }
  449. }
  450. inline std::unique_ptr<dsacache::CacheData> dsacache::Cache::GetFromCache(uint8_t* src, const size_t size, const int dst_node) {
  451. // the best situation is if this data is already cached
  452. // which we check in an unnamed block in which the cache
  453. // is locked for reading to prevent another thread
  454. // from marking the element we may find as unused and
  455. // clearing it
  456. LockedNodeCacheState* local_cache_state = cache_state_[dst_node];
  457. // lock the cache state in shared-mode because we read
  458. std::shared_lock<std::shared_mutex> lock(local_cache_state->cache_mutex_);
  459. // search for the data in our cache state structure at the given node
  460. const auto search = local_cache_state->node_cache_state_.find(src);
  461. // if the data is in our structure we continue
  462. if (search != local_cache_state->node_cache_state_.end()) {
  463. // now check whether the sizes match
  464. if (search->second.GetSize() >= size) {
  465. // return a unique copy of the entry which uses the object
  466. // lifetime and destructor to safely handle deallocation
  467. return std::move(std::make_unique<CacheData>(search->second));
  468. }
  469. else {
  470. // if the sizes missmatch then we clear the current entry from cache
  471. // which will cause its deletion only after the last possible outside
  472. // reference is also destroyed
  473. local_cache_state->node_cache_state_.erase(search);
  474. }
  475. }
  476. return nullptr;
  477. }
  478. void dsacache::Cache::Invalidate(uint8_t* data) {
  479. // as the cache is modified we must obtain a unique writers lock
  480. // loop through all per-node-caches available
  481. for (auto node : cache_state_) {
  482. std::unique_lock<std::shared_mutex> lock(node.second->cache_mutex_);
  483. // search for an entry for the given data pointer
  484. auto search = node.second->node_cache_state_.find(data);
  485. if (search != node.second->node_cache_state_.end()) {
  486. // if the data is represented in-cache
  487. // then it will be erased to re-trigger
  488. // caching on next access
  489. node.second->node_cache_state_.erase(search);
  490. }
  491. }
  492. }
  493. inline dsacache::Cache::~Cache() {
  494. for (auto node : cache_state_) {
  495. node.second->~LockedNodeCacheState();
  496. numa_free(reinterpret_cast<void*>(node.second), sizeof(LockedNodeCacheState));
  497. }
  498. }
  499. inline dsacache::CacheData::CacheData(uint8_t* data, const size_t size) {
  500. src_ = data;
  501. size_ = size;
  502. delete_ = false;
  503. active_ = new std::atomic<int32_t>(1);
  504. cache_ = new std::atomic<uint8_t*>(data);
  505. handlers_ = new std::atomic<std::vector<dml_handler>*>();
  506. incomplete_cache_ = new uint8_t*(nullptr);
  507. }
  508. inline dsacache::CacheData::CacheData(const dsacache::CacheData& other) {
  509. // we copy the ptr to the global atomic reference counter
  510. // and increase the amount of active references
  511. active_ = other.active_;
  512. const int current_active = active_->fetch_add(1);
  513. src_ = other.src_;
  514. size_ = other.size_;
  515. cache_ = other.cache_;
  516. flags_ = other.flags_;
  517. incomplete_cache_ = other.incomplete_cache_;
  518. handlers_ = other.handlers_;
  519. }
  520. inline dsacache::CacheData::~CacheData() {
  521. // due to fetch_sub returning the preivously held value
  522. // we must subtract one locally to get the current value
  523. const int32_t v = active_->fetch_sub(1) - 1;
  524. // if the returned value is zero or lower
  525. // then we must execute proper deletion
  526. // as this was the last reference
  527. if (v == 0) {
  528. // on deletion we must ensure that all offloaded
  529. // operations have completed successfully
  530. WaitOnCompletion();
  531. // only then can we deallocate the memory
  532. Deallocate();
  533. delete active_;
  534. delete cache_;
  535. delete handlers_;
  536. delete incomplete_cache_;
  537. }
  538. }
  539. inline void dsacache::CacheData::Deallocate() {
  540. // although deallocate should only be called from
  541. // a safe context to do so, it can not hurt to
  542. // defensively perform the operation atomically
  543. // and check for incomplete cache if no deallocation
  544. // takes place for the retrieved local cache
  545. uint8_t* cache_local = cache_->exchange(nullptr);
  546. if (cache_local != nullptr && delete_) numa_free(cache_local, size_);
  547. else if (*incomplete_cache_ != nullptr) numa_free(*incomplete_cache_, size_);
  548. else;
  549. }
  550. inline void dsacache::CacheData::WaitOnCompletion() {
  551. // first check if waiting is even neccessary as a valid
  552. // cache pointer signals that no waiting is to be performed
  553. if (cache_->load() != nullptr) {
  554. return;
  555. }
  556. // then check if the handlers are available
  557. handlers_->wait(nullptr);
  558. // exchange the global handlers pointer with nullptr to have a local
  559. // copy - this signals that this thread is the sole owner and therefore
  560. // responsible for waiting for them. we can not set to nullptr here but
  561. // set to maximum of 64-bit in order to prevent deadlocks from the above
  562. // waiting construct
  563. std::vector<dml_handler>* local_handlers = handlers_->exchange(reinterpret_cast<std::vector<dml_handler>*>(maxptr));
  564. // ensure that no other thread snatched the handlers before us
  565. // and in case one did, wait again and then return
  566. if (local_handlers == nullptr || local_handlers == reinterpret_cast<std::vector<dml_handler>*>(maxptr)) {
  567. cache_->wait(nullptr);
  568. return;
  569. }
  570. // at this point we are responsible for waiting for the handlers
  571. // and handling any error that comes through them gracefully
  572. bool error = false;
  573. for (auto& handler : *local_handlers) {
  574. if (CheckFlag(flags_, FLAG_WAIT_WEAK) && !handler.is_finished()) {
  575. handlers_->store(local_handlers);
  576. return;
  577. }
  578. auto result = handler.get();
  579. if (result.status != dml::status_code::ok) {
  580. // if one of the copy tasks failed we abort the whole task
  581. // after all operations are completed on it
  582. error = true;
  583. }
  584. }
  585. // at this point all handlers have been waited for
  586. // and therefore may be decomissioned
  587. delete local_handlers;
  588. // handle errors now by aborting the cache
  589. if (error) {
  590. cache_->store(src_);
  591. numa_free(*incomplete_cache_, size_);
  592. delete_ = false;
  593. *incomplete_cache_ = nullptr;
  594. }
  595. else {
  596. cache_->store(*incomplete_cache_);
  597. }
  598. // notify all waiting threads so they wake up quickly
  599. cache_->notify_all();
  600. handlers_->notify_all();
  601. }
  602. void dsacache::CacheData::SetTaskHandlersAndCache(uint8_t* cache, std::vector<dml_handler>* handlers) {
  603. *incomplete_cache_ = cache;
  604. handlers_->store(handlers);
  605. handlers_->notify_one();
  606. }
  607. void dsacache::CacheData::Init() {
  608. cache_->store(nullptr);
  609. delete_ = true;
  610. }