This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

82 lines
2.6 KiB

  1. #pragma once
  2. #include <iostream>
  3. #include "json/single_include/nlohmann/json.hpp"
  4. #include "statuscode-tostring.hpp"
  5. struct TaskData {
  6. // thread placement / engine selection
  7. uint8_t numa_node;
  8. // region size and source+destination for move
  9. size_t size;
  10. uint8_t nnode_src;
  11. uint8_t nnode_dst;
  12. // repetition
  13. uint32_t rep_count;
  14. bool batch_submit;
  15. uint32_t batch_size;
  16. uint32_t barrier_after_n_operations;
  17. // thread output
  18. dml::status_code status;
  19. // average run duration in microseconds
  20. double combined_duration;
  21. double submit_duration;
  22. double complete_duration;
  23. // completed iterations
  24. uint32_t rep_completed;
  25. // set by execution
  26. sem_t* sig;
  27. };
  28. inline void to_json(nlohmann::json& j, const TaskData& a) {
  29. j["task"]["size"] = a.size;
  30. j["task"]["iterations"]["desired"] = a.rep_count;
  31. j["task"]["iterations"]["actual"] = a.rep_completed;
  32. j["task"]["batching"]["enabled"] = a.batch_submit;
  33. j["task"]["batching"]["batch_size"] = a.batch_size;
  34. j["task"]["batching"]["barrier_after_n_operations"] = a.barrier_after_n_operations;
  35. j["affinity"]["node"] = a.numa_node;
  36. j["affinity"]["nnode_src"] = a.nnode_src;
  37. j["affinity"]["nnode_dst"] = a.nnode_dst;
  38. j["time"]["unit"] = "microseconds";
  39. j["time"]["summation"] = "average";
  40. j["time"]["completion"] = a.complete_duration;
  41. j["time"]["submission"] = a.submit_duration;
  42. j["time"]["combined"] = a.combined_duration;
  43. j["report"]["status"] = StatusCodeToString(a.status);
  44. }
  45. inline void from_json(const nlohmann::json& j, TaskData& a) {
  46. j["task"]["size"].get_to(a.size);
  47. j["task"]["iterations"]["desired"].get_to(a.rep_count);
  48. j["task"]["batching"]["enabled"].get_to(a.batch_submit);
  49. j["task"]["batching"]["batch_size"].get_to(a.batch_size);
  50. j["task"]["batching"]["barrier_after_n_operations"].get_to(a.barrier_after_n_operations);
  51. j["affinity"]["node"].get_to(a.numa_node);
  52. j["affinity"]["nnode_src"].get_to(a.nnode_src);
  53. j["affinity"]["nnode_dst"].get_to(a.nnode_dst);
  54. }
  55. inline void WriteResultLog(const std::vector<TaskData>& args, const std::string& path, std::ostream& os) {
  56. nlohmann::json json;
  57. json["count"] = args.size();
  58. json["path"] = path;
  59. json["list"] = args;
  60. os << std::setw(4) << json;
  61. }
  62. inline void ReadWorkDescription(std::vector<TaskData>& args, std::string& path, std::istream& is) {
  63. nlohmann::json json;
  64. is >> json;
  65. const uint32_t count = json.at("count");
  66. args.resize(count);
  67. path = json.at("path");
  68. for (uint32_t i = 0; i < count; i++) {
  69. args[i] = json["list"][i].template get<TaskData>();
  70. }
  71. }