This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

83 lines
3.2 KiB

  1. #pragma once
  2. #include <atomic>
  3. #include <vector>
  4. #include <unordered_map>
  5. #include <numa.h>
  6. #include <dml/dml.hpp>
  7. namespace offcache {
  8. // the cache task structure will be used to submit and
  9. // control a cache element, while providing source pointer
  10. // and size in bytes for submission
  11. //
  12. // then the submitting thread may wait on the atomic "result"
  13. // which will be notified by the cache worker upon processing
  14. // after which the atomic-bool-ptr active will also become valid
  15. //
  16. // the data pointed to by result and the bool-ptr are guaranteed
  17. // to remain valid until the value pointed to by active is changed
  18. // to false, after which the worker may clean up and delete the
  19. // structure - carefull, do not call delete on this, the worker does
  20. struct CacheTask {
  21. uint8_t* data_;
  22. size_t size_;
  23. std::atomic<uint8_t*> result_ { nullptr };
  24. std::atomic<bool>* active_;
  25. };
  26. // worker class, one for each numa node
  27. // discovers its node configuration on startup
  28. // and keeps track of available memory
  29. class CacheWorker {
  30. private:
  31. uint8_t numa_node_ = 0;
  32. std::unordered_map<uint8_t*, CacheTask*> cache_info_;
  33. public:
  34. // this is the mailbox of the worker to which a new task
  35. // may be submitted by exchanging nullptr with a valid one
  36. // and notifying on the atomic after which ownership
  37. // of the CacheTask structure is transferred to the worker
  38. std::atomic<CacheTask*>* task_slot_ = nullptr;
  39. static void run(CacheWorker* this_, const uint8_t numa_node);
  40. };
  41. // singleton which holds the cache workers
  42. // and is the place where work will be submited
  43. class CacheCoordinator {
  44. public:
  45. // cache policy is defined as a type here to allow flexible usage of the cacher
  46. // given a numa destination node (where the data will be needed), the numa source
  47. // node (current location of the data) and the data size, this function should
  48. // return optimal cache placement
  49. // dst node and returned value can differ if the system, for example, has HBM
  50. // attached accessible directly to node n under a different node id m
  51. typedef uint8_t (CachePolicy)(const uint8_t numa_dst_node, const uint8_t numa_src_node, const size_t data_size);
  52. // copy policy specifies the copy-executing nodes for a given task
  53. // which allows flexibility in assignment for optimizing raw throughput
  54. // or choosing a conservative usage policy
  55. typedef std::vector<uint8_t> (CopyPolicy)(const uint8_t numa_dst_node, const uint8_t numa_src_node);
  56. enum class ExecutionPolicy {
  57. Immediate, Relaxed, NoCache
  58. };
  59. private:
  60. CachePolicy* cache_policy_function_ = nullptr;
  61. CopyPolicy* copy_policy_function_ = nullptr;
  62. public:
  63. void Init(CachePolicy* cache_policy_function, CopyPolicy* copy_policy_function);
  64. // submits the given task and takes ownership of the pointer
  65. void SubmitTask(CacheTask* task, const ExecutionPolicy policy) const;
  66. static void WaitOnCompletion(CacheTask* task);
  67. static void SignalDataUnused(CacheTask* task);
  68. };
  69. }