This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

338 lines
12 KiB

  1. #pragma once
  2. #include <atomic>
  3. #include <vector>
  4. #include <thread>
  5. #include <unordered_map>
  6. #include <shared_mutex>
  7. #include <semaphore.h>
  8. #include <sched.h>
  9. #include <numa.h>
  10. #include <dml/dml.hpp>
  11. namespace offcache {
  12. // execution policy selects in which way the data is supposed to be cached
  13. // and returned with the following behaviour is guaranteed in addition to the
  14. // returned value being valid:
  15. // Immediate: return as fast as possible
  16. // may return cached data, can return data in RAM
  17. // will trigger caching of the data provided
  18. // ImmediateNoCache: return as fast as possible and never trigger caching
  19. // same as Immediate but will not trigger caching
  20. // Relaxed: no rapid return needed, take time
  21. // will trigger caching and may only return
  22. // once the caching is successful but can still
  23. // provide data in RAM
  24. enum class ExecutionPolicy {
  25. Relaxed, Immediate, ImmediateNoCache
  26. };
  27. // the cache task structure will be used to submit and
  28. // control a cache element, while providing source pointer
  29. // and size in bytes for submission
  30. //
  31. // then the submitting thread may wait on the atomic "result"
  32. // which will be notified by the cache worker upon processing
  33. // after which the atomic-bool-ptr active will also become valid
  34. struct CacheTask {
  35. uint8_t* data_;
  36. size_t size_;
  37. uint8_t* result_ = nullptr;
  38. uint8_t* maybe_result_ = nullptr;
  39. std::atomic<bool> active_ { true };
  40. std::atomic<bool> valid_ { false };
  41. std::vector<dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>>> handlers_;
  42. };
  43. // singleton which holds the cache workers
  44. // and is the place where work will be submited
  45. class CacheCoordinator {
  46. public:
  47. // cache policy is defined as a type here to allow flexible usage of the cacher
  48. // given a numa destination node (where the data will be needed), the numa source
  49. // node (current location of the data) and the data size, this function should
  50. // return optimal cache placement
  51. // dst node and returned value can differ if the system, for example, has HBM
  52. // attached accessible directly to node n under a different node id m
  53. typedef int (CachePolicy)(const int numa_dst_node, const int numa_src_node, const size_t data_size);
  54. // copy policy specifies the copy-executing nodes for a given task
  55. // which allows flexibility in assignment for optimizing raw throughput
  56. // or choosing a conservative usage policy
  57. typedef std::vector<int> (CopyPolicy)(const int numa_dst_node, const int numa_src_node);
  58. private:
  59. std::shared_mutex cache_mutex_;
  60. std::unordered_map<uint8_t*, CacheTask*> cache_state_;
  61. CachePolicy* cache_policy_function_ = nullptr;
  62. CopyPolicy* copy_policy_function_ = nullptr;
  63. dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>> ExecuteCopy(const uint8_t* src, uint8_t* dst, const size_t size, const int node) const;
  64. void SubmitTask(CacheTask* task);
  65. CacheTask* CreateTask(const uint8_t *data, const size_t size) const;
  66. void DestroyTask(CacheTask* task) const;
  67. public:
  68. void Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function);
  69. // function to perform data access through the cache
  70. // behaviour depends on the chosen execution policy
  71. // Immediate and ImmediateNoCache return a cache task
  72. // with guaranteed-valid result value where Relaxed
  73. // policy does not come with this guarantee.
  74. CacheTask* Access(uint8_t* data, const size_t size, const ExecutionPolicy policy);
  75. // waits upon completion of caching
  76. static void WaitOnCompletion(CacheTask* task);
  77. // invalidates the given pointer
  78. // afterwards the reference to the
  79. // cache task object may be forgotten
  80. static void SignalDataUnused(CacheTask* task);
  81. // returns the location of the cached data
  82. // which may or may not be valid
  83. static uint8_t* GetDataLocation(CacheTask* task);
  84. void Flush();
  85. };
  86. }
  87. inline void offcache::CacheCoordinator::Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function) {
  88. cache_policy_function_ = cache_policy_function;
  89. copy_policy_function_ = copy_policy_function;
  90. // initialize numa library
  91. numa_available();
  92. }
  93. inline offcache::CacheTask* offcache::CacheCoordinator::Access(uint8_t* data, const size_t size, const ExecutionPolicy policy) {
  94. // the best situation is if this data is already cached
  95. // which we check in an unnamed block in which the cache
  96. // is locked for reading to prevent another thread
  97. // from marking the element we may find as unused and
  98. // clearing it
  99. {
  100. std::shared_lock<std::shared_mutex> lock(cache_mutex_);
  101. const auto search = cache_state_.find(data);
  102. if (search != cache_state_.end()) {
  103. if (search->second->size_ == size) {
  104. search->second->active_.store(true);
  105. // TODO: check for completed status depending on execution policy
  106. return search->second;
  107. }
  108. else {
  109. DestroyTask(search->second);
  110. cache_state_.erase(search);
  111. }
  112. }
  113. }
  114. // at this point the requested data is not present in cache
  115. // and we create a caching task for it
  116. CacheTask* task = CreateTask(data, size);
  117. if (policy == ExecutionPolicy::Immediate) {
  118. // in intermediate mode the returned task
  119. // object is guaranteed to be valid and therefore
  120. // its resulting location must be validated
  121. // after which we submit the task
  122. // maybe_result is then set by submit
  123. task->result_ = data;
  124. SubmitTask(task);
  125. return task;
  126. }
  127. else if (policy == ExecutionPolicy::ImmediateNoCache) {
  128. // for immediatenocache we just validate
  129. // the generated task and return it
  130. // we must also set maybe_result in case
  131. // someone waits on this
  132. task->result_ = data;
  133. task->maybe_result_ = data;
  134. return task;
  135. }
  136. else if (policy == ExecutionPolicy::Relaxed) {
  137. // for relaxed no valid task must be returned
  138. // and therefore we just submit and then give
  139. // the possible invalid task back with only
  140. // maybe_result set by submission
  141. SubmitTask(task);
  142. return task;
  143. }
  144. else {
  145. // this should not be reached
  146. }
  147. }
  148. inline void offcache::CacheCoordinator::SubmitTask(CacheTask* task) {
  149. // obtain numa node of current thread to determine where the data is needed
  150. const int current_cpu = sched_getcpu();
  151. const int current_node = numa_node_of_cpu(current_cpu);
  152. // obtain node that the given data pointer is allocated on
  153. int data_node = -1;
  154. get_mempolicy(&data_node, NULL, 0, (void*)task->data_, MPOL_F_NODE | MPOL_F_ADDR);
  155. // querry cache policy function for the destination numa node
  156. const uint32_t dst_node = cache_policy_function_(current_node, data_node, task->size_);
  157. // allocate data on this node and flush the unused parts of the
  158. // cache if the operation fails and retry once
  159. // TODO: smarter flush strategy could keep some stuff cached
  160. uint8_t* dst = numa_alloc_onnode(task->size_, dst_node);
  161. if (dst == nullptr) {
  162. Flush();
  163. dst = numa_alloc_onnode(task->size_, dst_node);
  164. if (dst == nullptr) {
  165. return;
  166. }
  167. }
  168. task->maybe_result_ = dst;
  169. // querry copy policy function for the nodes to use for the copy
  170. const std::vector<int> executing_nodes = copy_policy_function_(dst_node, data_node);
  171. const size_t task_count = executing_nodes.size();
  172. // at this point the task may be added to the cache structure
  173. // due to the task being initialized with the valid flag set to false
  174. {
  175. std::unique_lock<std::shared_mutex> lock(cache_mutex_);
  176. const auto state = cache_state_.insert({task->data_, task});
  177. // if state.second is false then no insertion took place
  178. // which means that concurrently whith this thread
  179. // some other thread must have accessed the same
  180. // resource in which case we must perform an abort
  181. // TODO: abort is not the only way to handle this situation
  182. if (!state.second) {
  183. // abort by doing the following steps
  184. // (1) free the allocated memory, (2) remove the "maybe result" as
  185. // we will not run the caching operation, (3) clear the sub tasks
  186. // for the very same reason, (4) set the result to the RAM-location
  187. numa_free(dst, task->size_);
  188. task->maybe_result_ = nullptr;
  189. task->result_ = task->data_;
  190. return;
  191. }
  192. }
  193. // each task will copy one fair part of the total size
  194. // and in case the total size is not a factor of the
  195. // given task count the last node must copy the remainder
  196. const size_t size = task->size_ / task_count;
  197. const size_t last_size = size + task->size_ % task_count;
  198. // save the current numa node mask to restore later
  199. // as executing the copy task will place this thread
  200. // on a different node
  201. const int nodemask = numa_get_run_node_mask();
  202. for (uint32_t i = 0; i < task_count; i++) {
  203. const size_t local_size = i + 1 == task_count ? size : last_size;
  204. const size_t local_offset = i * size;
  205. const uint8_t* local_src = task->data_ + local_offset;
  206. uint8_t* local_dst = dst + local_offset;
  207. const auto handler = ExecuteCopy(local_src, local_dst, local_size, executing_nodes[i]);
  208. task->handlers_.emplace_back(handler);
  209. }
  210. // set the valid flag of the task as all handlers
  211. // required for completion signal are registered
  212. task->valid_.store(true);
  213. task->valid_.notify_all();
  214. // restore the previous nodemask
  215. numa_run_on_node_mask(nodemask);
  216. }
  217. inline dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>> offcache::CacheCoordinator::ExecuteCopy(const uint8_t* src, uint8_t* dst, const size_t size, const int node) {
  218. dml::data_view srcv = dml::make_view(reinterpret_cast<uint8_t*>(src), size);
  219. dml::data_view dstv = dml::make_view(reinterpret_cast<uint8_t*>(dst), size);
  220. numa_run_on_node(node);
  221. return dml::submit<path>(dml::mem_copy.block_on_fault(), srcv, dstv);
  222. }
  223. inline offcache::CacheTask* offcache::CacheCoordinator::CreateTask(const uint8_t* data, const size_t size) const {
  224. CacheTask* task = new CacheTask();
  225. task->data_ = data;
  226. task->size_ = size;
  227. return task;
  228. }
  229. inline void offcache::CacheCoordinator::DestroyTask(CacheTask* task) const {
  230. numa_free(task->result_, task->size_);
  231. delete task;
  232. }
  233. inline void offcache::CacheCoordinator::WaitOnCompletion(CacheTask* task) {
  234. task->valid_.wait(false);
  235. for (auto& handler : task->handlers_) {
  236. auto result = handler.get();
  237. // TODO: handle the returned status code
  238. }
  239. task->handlers_.clear();
  240. }
  241. inline uint8_t* offcache::CacheCoordinator::GetDataLocation(CacheTask* task) {
  242. return task->result_;
  243. }
  244. inline void offcache::CacheCoordinator::SignalDataUnused(CacheTask* task) {
  245. task->active_.store(false);
  246. }
  247. inline void offcache::CacheCoordinator::Flush() {
  248. // TODO: there probably is a better way to implement this flush
  249. {
  250. std::unique_lock<std::shared_mutex> lock(cache_mutex_);
  251. auto it = cache_state_.begin();
  252. while (it != cache_state_.end()) {
  253. if (it->second->active_.load() == false) {
  254. DestroyTask(it->second);
  255. cache_state_.erase(it);
  256. it = cache_state_.begin();
  257. }
  258. else {
  259. it++;
  260. }
  261. }
  262. }
  263. }