This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

122 lines
4.4 KiB

  1. #pragma once
  2. #include <iostream>
  3. #include <vector>
  4. #include <chrono>
  5. #include <numeric>
  6. #include <future>
  7. #include <pthread.h>
  8. #include <semaphore.h>
  9. #include <numa.h>
  10. #include <dml/dml.hpp>
  11. #include "util/dml-helper.hpp"
  12. #include "util/task-data.hpp"
  13. #define LOG_CODE_INFO "Location: " << __FILE__ << "@" << __LINE__ << "::" << __FUNCTION__ << std::endl
  14. #define LOG_ERR { pthread_t t = pthread_self(); std::cerr << "--- BEGIN ERROR MSG ---" << std::endl << "Physical: [Node " << args->numa_node << " | Thread " << t << "]" << std::endl; } std::cerr << LOG_CODE_INFO
  15. #define CHECK_STATUS(status,msg) { if (status != dml::status_code::ok) { LOG_ERR << "Status Code: " << StatusCodeToString(status) << std::endl << msg << std::endl; args->status = status; return nullptr; }}
  16. std::shared_future<void> LAUNCH_;
  17. template <typename path>
  18. void* thread_function(void* argp) {
  19. TaskData* args = reinterpret_cast<TaskData*>(argp);
  20. // set numa node and core affinity of the current thread
  21. numa_run_on_node(args->numa_node);
  22. // allocate memory for the move operation on the requested numa nodes
  23. void* src = numa_alloc_onnode(args->size, args->nnode_src);
  24. void* dst = numa_alloc_onnode(args->size, args->nnode_dst);
  25. dml::data_view srcv = dml::make_view(reinterpret_cast<uint8_t*>(src), args->size);
  26. dml::data_view dstv = dml::make_view(reinterpret_cast<uint8_t*>(dst), args->size);
  27. std::memset(src, 0, args->size);
  28. std::memset(dst, 0, args->size);
  29. args->status = dml::status_code::ok;
  30. LAUNCH_.wait();
  31. if (args->batch_size > 1) {
  32. auto sequence = dml::sequence(args->batch_size, std::allocator<dml::byte_t>());
  33. for (uint32_t j = 0; j < args->batch_size; j++) {
  34. // block_on_fault() is required to submit the task in a way so that the
  35. // DSA engine can handle page faults itself together with the IOMMU which
  36. // requires the WQ to be configured to allow this too
  37. const auto status = sequence.add(dml::mem_copy.block_on_fault(), srcv, dstv);
  38. CHECK_STATUS(status, "Adding operation to batch failed!");
  39. }
  40. // we use the asynchronous submit-routine even though this is not required
  41. // here, however the project later on will only use async operation and
  42. // therefore this behaviour should be benchmarked
  43. auto handler = dml::submit<path>(dml::batch, sequence);
  44. auto result = handler.get();
  45. const dml::status_code status = result.status;
  46. CHECK_STATUS(status, "Batch completed with an Error!");
  47. }
  48. else {
  49. // we use the asynchronous submit-routine even though this is not required
  50. // here, however the project later on will only use async operation and
  51. // therefore this behaviour should be benchmarked
  52. // block_on_fault() is required to submit the task in a way so that the
  53. // DSA engine can handle page faults itself together with the IOMMU which
  54. // requires the WQ to be configured to allow this too
  55. auto handler = dml::submit<path>(dml::mem_copy.block_on_fault(), srcv, dstv);
  56. auto result = handler.get();
  57. const dml::status_code status = result.status;
  58. CHECK_STATUS(status, "Operation completed with an Error!");
  59. }
  60. // free the allocated memory regions on the selected nodes
  61. numa_free(src, args->size);
  62. numa_free(dst, args->size);
  63. return nullptr;
  64. }
  65. template <typename path>
  66. std::vector<uint64_t> execute_dml_memcpy(std::vector<TaskData>& args, const uint64_t iterations) {
  67. std::vector<uint64_t> timing;
  68. // initialize numa library
  69. numa_available();
  70. // for each submitted task we link the semaphore
  71. // and create the thread, passing the argument
  72. for (uint64_t i = 0; i < iterations + 5; i++) {
  73. std::vector<std::thread> threads;
  74. std::promise<void> launch_promise;
  75. LAUNCH_ = launch_promise.get_future();
  76. for (auto& arg : args) {
  77. threads.emplace_back(thread_function<path>, &arg);
  78. }
  79. using namespace std::chrono_literals;
  80. std::this_thread::sleep_for(100ms);
  81. const auto time_start = std::chrono::steady_clock::now();
  82. launch_promise.set_value();
  83. for(std::thread& t : threads) { t.join(); }
  84. const auto time_end = std::chrono::steady_clock::now();
  85. if (i >= 5) timing.emplace_back(std::chrono::duration_cast<std::chrono::nanoseconds>(time_end - time_start).count());
  86. }
  87. return timing;
  88. }