This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

418 lines
14 KiB

  1. #pragma once
  2. #include <iostream>
  3. #include <atomic>
  4. #include <vector>
  5. #include <thread>
  6. #include <unordered_map>
  7. #include <shared_mutex>
  8. #include <mutex>
  9. #include <memory>
  10. #include <semaphore.h>
  11. #include <sched.h>
  12. #include <numa.h>
  13. #include <numaif.h>
  14. #include <dml/dml.hpp>
  15. namespace offcache {
  16. // execution policy selects in which way the data is supposed to be cached
  17. // and returned with the following behaviour is guaranteed in addition to the
  18. // returned value being valid:
  19. // Immediate: return as fast as possible
  20. // may return cached data, can return data in RAM
  21. // will trigger caching of the data provided
  22. // ImmediateNoCache: return as fast as possible and never trigger caching
  23. // same as Immediate but will not trigger caching
  24. // Relaxed: no rapid return needed, take time
  25. // will trigger caching and may only return
  26. // once the caching is successful but can still
  27. // provide data in RAM
  28. enum class ExecutionPolicy {
  29. Relaxed, Immediate, ImmediateNoCache
  30. };
  31. class Cache;
  32. // the cache task structure will be used to submit and
  33. // control a cache element, while providing source pointer
  34. // and size in bytes for submission
  35. //
  36. // then the submitting thread may wait on the atomic "result"
  37. // which will be notified by the cache worker upon processing
  38. // after which the atomic-bool-ptr active will also become valid
  39. class CacheData {
  40. public:
  41. using dml_handler = dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>>;
  42. private:
  43. uint8_t* src_;
  44. size_t size_;
  45. std::atomic<int32_t>* active_;
  46. protected:
  47. std::atomic<uint8_t*>* cache_;
  48. uint8_t* incomplete_cache_;
  49. std::unique_ptr<std::vector<dml_handler>> handlers_;
  50. friend Cache;
  51. public:
  52. CacheData(uint8_t* data, const size_t size);
  53. CacheData(const CacheData& other);
  54. ~CacheData();
  55. void Deallocate();
  56. void WaitOnCompletion();
  57. uint8_t* GetDataLocation() const;
  58. bool Active() const;
  59. };
  60. // singleton which holds the cache workers
  61. // and is the place where work will be submited
  62. class Cache {
  63. public:
  64. // cache policy is defined as a type here to allow flexible usage of the cacher
  65. // given a numa destination node (where the data will be needed), the numa source
  66. // node (current location of the data) and the data size, this function should
  67. // return optimal cache placement
  68. // dst node and returned value can differ if the system, for example, has HBM
  69. // attached accessible directly to node n under a different node id m
  70. typedef int (CachePolicy)(const int numa_dst_node, const int numa_src_node, const size_t data_size);
  71. // copy policy specifies the copy-executing nodes for a given task
  72. // which allows flexibility in assignment for optimizing raw throughput
  73. // or choosing a conservative usage policy
  74. typedef std::vector<int> (CopyPolicy)(const int numa_dst_node, const int numa_src_node);
  75. private:
  76. std::shared_mutex cache_mutex_;
  77. std::unordered_map<uint8_t*, CacheData> cache_state_;
  78. CachePolicy* cache_policy_function_ = nullptr;
  79. CopyPolicy* copy_policy_function_ = nullptr;
  80. dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>> ExecuteCopy(const uint8_t* src, uint8_t* dst, const size_t size, const int node) const;
  81. void SubmitTask(CacheData* task);
  82. public:
  83. void Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function);
  84. // function to perform data access through the cache
  85. // behaviour depends on the chosen execution policy
  86. // Immediate and ImmediateNoCache return a cache task
  87. // with guaranteed-valid result value where Relaxed
  88. // policy does not come with this guarantee.
  89. std::unique_ptr<CacheData> Access(uint8_t* data, const size_t size, const ExecutionPolicy policy);
  90. void Flush();
  91. };
  92. }
  93. inline void offcache::Cache::Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function) {
  94. cache_policy_function_ = cache_policy_function;
  95. copy_policy_function_ = copy_policy_function;
  96. // initialize numa library
  97. numa_available();
  98. std::cout << "[-] Cache Initialized" << std::endl;
  99. }
  100. inline std::unique_ptr<offcache::CacheData> offcache::Cache::Access(uint8_t* data, const size_t size, const ExecutionPolicy policy) {
  101. // the best situation is if this data is already cached
  102. // which we check in an unnamed block in which the cache
  103. // is locked for reading to prevent another thread
  104. // from marking the element we may find as unused and
  105. // clearing it
  106. {
  107. std::shared_lock<std::shared_mutex> lock(cache_mutex_);
  108. const auto search = cache_state_.find(data);
  109. if (search != cache_state_.end()) {
  110. if (search->second.size_ == size) {
  111. search->second.active_->store(true);
  112. std::cout << "[+] Found Cached version for 0x" << std::hex << (uint64_t)data << std::dec << std::endl;
  113. return std::move(std::make_unique<CacheData>(search->second));
  114. }
  115. else {
  116. std::cout << "[!] Found Cached version with size missmatch for 0x" << std::hex << (uint64_t)data << std::dec << std::endl;
  117. cache_state_.erase(search);
  118. }
  119. }
  120. }
  121. // at this point the requested data is not present in cache
  122. // and we create a caching task for it
  123. auto task = std::make_unique<CacheData>(data, size);
  124. if (policy == ExecutionPolicy::Immediate) {
  125. // in intermediate mode the returned task
  126. // object is guaranteed to be valid and therefore
  127. // its resulting location must be validated
  128. // after which we submit the task
  129. // maybe_result is then set by submit
  130. task->cache_->store(data);
  131. SubmitTask(task.get());
  132. return std::move(task);
  133. }
  134. else if (policy == ExecutionPolicy::ImmediateNoCache) {
  135. // for immediatenocache we just validate
  136. // the generated task and return it
  137. // we must also set maybe_result in case
  138. // someone waits on this
  139. task->cache_->store(data);
  140. task->incomplete_cache_ = data;
  141. return std::move(task);
  142. }
  143. else if (policy == ExecutionPolicy::Relaxed) {
  144. // for relaxed no valid task must be returned
  145. // and therefore we just submit and then give
  146. // the possible invalid task back with only
  147. // maybe_result set by submission
  148. SubmitTask(task.get());
  149. return std::move(task);
  150. }
  151. else {
  152. // this should not be reached
  153. }
  154. }
  155. inline void offcache::Cache::SubmitTask(CacheData* task) {
  156. // obtain numa node of current thread to determine where the data is needed
  157. const int current_cpu = sched_getcpu();
  158. const int current_node = numa_node_of_cpu(current_cpu);
  159. // obtain node that the given data pointer is allocated on
  160. int data_node = -1;
  161. get_mempolicy(&data_node, NULL, 0, (void*)task->src_, MPOL_F_NODE | MPOL_F_ADDR);
  162. // querry cache policy function for the destination numa node
  163. const int dst_node = cache_policy_function_(current_node, data_node, task->size_);
  164. std::cout << "[+] Allocating " << task->size_ << "B on node " << dst_node << " for " << std::hex << (uint64_t)task->src_ << std::dec << std::endl;
  165. // allocate data on this node and flush the unused parts of the
  166. // cache if the operation fails and retry once
  167. // TODO: smarter flush strategy could keep some stuff cached
  168. uint8_t* dst = reinterpret_cast<uint8_t*>(numa_alloc_onnode(task->size_, dst_node));
  169. if (dst == nullptr) {
  170. std::cout << "[!] First allocation try failed for " << task->size_ << "B on node " << dst_node << std::endl;
  171. Flush();
  172. dst = reinterpret_cast<uint8_t*>(numa_alloc_onnode(task->size_, dst_node));
  173. if (dst == nullptr) {
  174. std::cout << "[x] Second allocation try failed for " << task->size_ << "B on node " << dst_node << std::endl;
  175. return;
  176. }
  177. }
  178. task->incomplete_cache_ = dst;
  179. // querry copy policy function for the nodes to use for the copy
  180. const std::vector<int> executing_nodes = copy_policy_function_(dst_node, data_node);
  181. const size_t task_count = executing_nodes.size();
  182. // each task will copy one fair part of the total size
  183. // and in case the total size is not a factor of the
  184. // given task count the last node must copy the remainder
  185. const size_t size = task->size_ / task_count;
  186. const size_t last_size = size + task->size_ % task_count;
  187. // save the current numa node mask to restore later
  188. // as executing the copy task will place this thread
  189. // on a different node
  190. bitmask* nodemask = numa_get_run_node_mask();
  191. for (uint32_t i = 0; i < task_count; i++) {
  192. const size_t local_size = i + 1 == task_count ? size : last_size;
  193. const size_t local_offset = i * size;
  194. const uint8_t* local_src = task->src_ + local_offset;
  195. uint8_t* local_dst = dst + local_offset;
  196. task->handlers_->emplace_back(ExecuteCopy(local_src, local_dst, local_size, executing_nodes[i]));
  197. }
  198. // only at this point may the task be added to the control structure
  199. // because adding it earlier could cause it to be returned for an
  200. // access request while the handler-vector is not fully populated
  201. // which could cause the wait-function to return prematurely
  202. // TODO: this can be optimized because the abort is quite expensive
  203. {
  204. std::unique_lock<std::shared_mutex> lock(cache_mutex_);
  205. const auto state = cache_state_.insert({task->src_, *task});
  206. // if state.second is false then no insertion took place
  207. // which means that concurrently whith this thread
  208. // some other thread must have accessed the same
  209. // resource in which case we must perform an abort
  210. // TODO: abort is not the only way to handle this situation
  211. if (!state.second) {
  212. std::cout << "[x] Found another cache instance for 0x" << std::hex << (uint64_t)task->src_ << std::dec << std::endl;
  213. // first wait on all copy operations to be completed
  214. task->WaitOnCompletion();
  215. // abort by doing the following steps
  216. // (1) free the allocated memory, (2) remove the "maybe result" as
  217. // we will not run the caching operation, (3) clear the sub tasks
  218. // for the very same reason, (4) set the result to the RAM-location
  219. numa_free(dst, task->size_);
  220. task->incomplete_cache_ = nullptr;
  221. task->cache_->store(task->src_);
  222. std::cout << "[-] Abort completed for 0x" << std::hex << (uint64_t)task->src_ << std::dec << std::endl;
  223. return;
  224. }
  225. }
  226. // restore the previous nodemask
  227. numa_run_on_node_mask(nodemask);
  228. }
  229. inline dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>> offcache::Cache::ExecuteCopy(const uint8_t* src, uint8_t* dst, const size_t size, const int node) const {
  230. dml::const_data_view srcv = dml::make_view(src, size);
  231. dml::data_view dstv = dml::make_view(dst, size);
  232. numa_run_on_node(node);
  233. return dml::submit<dml::automatic>(dml::mem_copy.block_on_fault(), srcv, dstv);
  234. }
  235. inline void offcache::CacheData::WaitOnCompletion() {
  236. if (handlers_ == nullptr) {
  237. std::cout << "[-] Waiting on cache-var-update for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl;
  238. cache_->wait(nullptr);
  239. std::cout << "[+] Finished waiting on cache-var-update for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl;
  240. }
  241. else {
  242. std::cout << "[-] Waiting on handlers for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl;
  243. for (auto& handler : *handlers_) {
  244. auto result = handler.get();
  245. // TODO: handle the returned status code
  246. }
  247. handlers_ = nullptr;
  248. std::cout << "[+] Finished waiting on handlers for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl;
  249. cache_->store(incomplete_cache_);
  250. cache_->notify_all();
  251. }
  252. }
  253. offcache::CacheData::CacheData(uint8_t* data, const size_t size) {
  254. std::cout << "[-] New CacheData 0x" << std::hex << (uint64_t)data << std::dec << std::endl;
  255. src_ = data;
  256. size_ = size;
  257. active_ = new std::atomic<int32_t>();
  258. cache_ = new std::atomic<uint8_t*>();
  259. incomplete_cache_ = nullptr;
  260. handlers_ = std::make_unique<std::vector<dml_handler>>();
  261. }
  262. offcache::CacheData::CacheData(const offcache::CacheData& other) {
  263. std::cout << "[-] Copy Created for CacheData 0x" << std::hex << (uint64_t)other.src_ << std::dec << std::endl;
  264. src_ = other.src_;
  265. size_ = other.size_;
  266. cache_ = other.cache_;
  267. active_ = other.active_;
  268. incomplete_cache_ = nullptr;
  269. handlers_ = nullptr;
  270. active_->fetch_add(1);
  271. }
  272. offcache::CacheData::~CacheData() {
  273. std::cout << "[-] Destructor for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl;
  274. const int32_t v = active_->fetch_sub(1);
  275. // if the returned value is non-positive
  276. // then we must execute proper deletion
  277. // as this was the last reference
  278. if (v <= 0) {
  279. std::cout << "[!] Full Destructor for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl;
  280. Deallocate();
  281. delete active_;
  282. delete cache_;
  283. }
  284. }
  285. void offcache::CacheData::Deallocate() {
  286. std::cout << "[!] Deallocating for CacheData 0x" << std::hex << (uint64_t)src_ << std::dec << std::endl;
  287. numa_free(cache_, size_);
  288. cache_ = nullptr;
  289. incomplete_cache_ = nullptr;
  290. }
  291. uint8_t *offcache::CacheData::GetDataLocation() const {
  292. return cache_->load();
  293. }
  294. bool offcache::CacheData::Active() const {
  295. return active_->load() > 0;
  296. }
  297. inline void offcache::Cache::Flush() {
  298. std::cout << "[-] Flushing Cache" << std::endl;
  299. // TODO: there is a better way to implement this flush
  300. {
  301. std::unique_lock<std::shared_mutex> lock(cache_mutex_);
  302. auto it = cache_state_.begin();
  303. while (it != cache_state_.end()) {
  304. if (it->second.Active() == false) {
  305. cache_state_.erase(it);
  306. it = cache_state_.begin();
  307. }
  308. else {
  309. it++;
  310. }
  311. }
  312. }
  313. }