This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

147 lines
5.2 KiB

  1. #pragma once
  2. #include <atomic>
  3. #include <vector>
  4. #include <thread>
  5. #include <unordered_map>
  6. #include <semaphore.h>
  7. #include <numa.h>
  8. #include <dml/dml.hpp>
  9. namespace offcache {
  10. // execution policy selects in which way the data is supposed to be cached
  11. // and returned with the following behaviour is guaranteed in addition to the
  12. // returned value being valid:
  13. // Immediate: return as fast as possible
  14. // may return cached data, can return data in RAM
  15. // will trigger caching of the data provided
  16. // ImmediateNoCache: return as fast as possible and never trigger caching
  17. // same as Immediate but will not trigger caching
  18. // Relaxed: no rapid return needed, take time
  19. // will trigger caching and may only return
  20. // once the caching is successful but can still
  21. // provide data in RAM
  22. enum class ExecutionPolicy {
  23. Relaxed, Immediate, ImmediateNoCache
  24. };
  25. struct WorkerTask {
  26. uint8_t* src_;
  27. uint8_t* dst_;
  28. size_t size_;
  29. std::atomic<bool> completed_ { false };
  30. };
  31. // the cache task structure will be used to submit and
  32. // control a cache element, while providing source pointer
  33. // and size in bytes for submission
  34. //
  35. // then the submitting thread may wait on the atomic "result"
  36. // which will be notified by the cache worker upon processing
  37. // after which the atomic-bool-ptr active will also become valid
  38. struct CacheTask {
  39. uint8_t* data_;
  40. size_t size_;
  41. ExecutionPolicy policy_;
  42. uint8_t* result_;
  43. std::atomic<bool> active_;
  44. std::vector<WorkerTask> sub_tasks_;
  45. };
  46. // worker class, one for each numa node
  47. // discovers its node configuration on startup
  48. // and keeps track of available memory
  49. class CacheWorker {
  50. public:
  51. uint8_t numa_node_ = 0;
  52. // this is the mailbox of the worker to which a new task
  53. // may be submitted by exchanging nullptr with a valid one
  54. // and notifying on the atomic after which ownership
  55. // of the CacheTask structure is transferred to the worker
  56. std::atomic<WorkerTask*>* task_slot_ = nullptr;
  57. static void run(CacheWorker* this_);
  58. };
  59. // singleton which holds the cache workers
  60. // and is the place where work will be submited
  61. class CacheCoordinator {
  62. public:
  63. // cache policy is defined as a type here to allow flexible usage of the cacher
  64. // given a numa destination node (where the data will be needed), the numa source
  65. // node (current location of the data) and the data size, this function should
  66. // return optimal cache placement
  67. // dst node and returned value can differ if the system, for example, has HBM
  68. // attached accessible directly to node n under a different node id m
  69. typedef uint8_t (CachePolicy)(const uint8_t numa_dst_node, const uint8_t numa_src_node, const size_t data_size);
  70. // copy policy specifies the copy-executing nodes for a given task
  71. // which allows flexibility in assignment for optimizing raw throughput
  72. // or choosing a conservative usage policy
  73. typedef std::vector<uint8_t> (CopyPolicy)(const uint8_t numa_dst_node, const uint8_t numa_src_node);
  74. private:
  75. std::unordered_map<uint8_t, CacheWorker> workers_;
  76. std::unordered_map<uint8_t*, CacheTask*> cache_state_;
  77. CachePolicy* cache_policy_function_ = nullptr;
  78. CopyPolicy* copy_policy_function_ = nullptr;
  79. public:
  80. void Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function);
  81. // submits the given task and takes ownership of the pointer
  82. void SubmitTask(CacheTask* task, const ExecutionPolicy policy) const;
  83. // waits upon completion of caching
  84. // returns the location of the data
  85. static uint8_t* WaitOnCompletion(CacheTask* task);
  86. // invalidates the given pointer
  87. static void SignalDataUnused(CacheTask* task);
  88. };
  89. }
  90. void offcache::CacheWorker::run(CacheWorker* this_) {
  91. }
  92. void offcache::CacheCoordinator::Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function) {
  93. cache_policy_function_ = cache_policy_function;
  94. copy_policy_function_ = copy_policy_function;
  95. // initialize numa library
  96. numa_available();
  97. const uint8_t nodes_max = numa_num_configured_nodes();
  98. const uint8_t valid_nodes = numa_get_mems_allowed();
  99. for (uint8_t node = 0; node < nodes_max; node++) {
  100. if (numa_bitmask_isbitset(valid_nodes, node)) {
  101. workers_.insert({ node, CacheWorker() });
  102. workers_[node].numa_node_ = node;
  103. std::thread t (CacheWorker::run, &workers_[node]);
  104. t.detach();
  105. }
  106. }
  107. }
  108. void offcache::CacheCoordinator::SubmitTask(CacheTask* task, const ExecutionPolicy policy) const {
  109. }
  110. uint8_t* offcache::CacheCoordinator::WaitOnCompletion(CacheTask* task) {
  111. while (!task->sub_tasks_.empty()) {
  112. task->sub_tasks_.back().completed_.wait(false);
  113. task->sub_tasks_.pop_back();
  114. }
  115. }
  116. void offcache::CacheCoordinator::SignalDataUnused(CacheTask* task) {
  117. task->active_.store(false);
  118. }