This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

62 lines
1.7 KiB

#pragma once
#include <iostream>
#include "json/single_include/nlohmann/json.hpp"
#include "dml-helper.hpp"
struct TaskData {
// thread placement / engine selection
uint8_t numa_node;
// region size and source+destination for move
size_t size;
uint8_t nnode_src;
uint8_t nnode_dst;
// repetition
uint32_t rep_count;
uint32_t batch_size;
};
inline void to_json(nlohmann::json& j, const TaskData& a) {
j["task"]["size"] = a.size;
j["task"]["batch_size"] = a.batch_size;
j["task"]["reps"] = a.rep_count;
j["affinity"]["node"] = a.numa_node;
j["affinity"]["nnode_src"] = a.nnode_src;
j["affinity"]["nnode_dst"] = a.nnode_dst;
}
inline void from_json(const nlohmann::json& j, TaskData& a) {
j["task"]["size"].get_to(a.size);
j["task"]["batch_size"].get_to(a.batch_size);
j["task"]["reps"].get_to(a.rep_count);
j["affinity"]["node"].get_to(a.numa_node);
j["affinity"]["nnode_src"].get_to(a.nnode_src);
j["affinity"]["nnode_dst"].get_to(a.nnode_dst);
}
inline void WriteResultLog(const std::vector<TaskData>& args, const std::string& path, const std::vector<uint64_t>& times, std::ostream& os) {
nlohmann::json json;
json["count"] = args.size();
json["timings"] = times;
json["path"] = path;
json["list"] = args;
os << json;
}
inline void ReadWorkDescription(std::vector<TaskData>& args, std::string& path, uint64_t& repetitions, std::istream& is) {
nlohmann::json json;
is >> json;
const uint32_t count = json.at("count");
args.resize(count);
path = json.at("path");
repetitions = json.at("repetitions");
for (uint32_t i = 0; i < count; i++) {
args[i] = json["list"][i].template get<TaskData>();
}
}