Browse Source

remove buggy option for multiple sizes

master
Constantin Fürst 1 year ago
parent
commit
8c5a061343
  1. 3
      benchmarks/benchmark.hpp
  2. 47
      benchmarks/task-data.hpp
  3. 5
      benchmarks/task-description.json

3
benchmarks/benchmark.hpp

@ -143,7 +143,8 @@ void* thread_function(void* argp) {
ADD_TIMING_MESSUREMENT; ADD_TIMING_MESSUREMENT;
} }
args->rep_completed++;
// again: we do not count the first 5 repetitions
if (i >= 5) args->rep_completed++;
} }
// free the allocated memory regions on the selected nodes // free the allocated memory regions on the selected nodes

47
benchmarks/task-data.hpp

@ -33,21 +33,6 @@ struct TaskData {
barrier* barrier_; barrier* barrier_;
}; };
struct ReadTaskData {
// thread placement / engine selection
uint8_t numa_node;
// region size and source+destination for move
std::vector<size_t> sizes;
uint8_t nnode_src;
uint8_t nnode_dst;
// repetition
uint32_t rep_count;
bool batch_submit;
uint32_t batch_size;
void AddToTaskVector(std::vector<TaskData>& v) const;
};
inline void to_json(nlohmann::json& j, const TaskData& a) { inline void to_json(nlohmann::json& j, const TaskData& a) {
j["task"]["size"] = a.size; j["task"]["size"] = a.size;
j["task"]["iterations"] = a.rep_count; j["task"]["iterations"] = a.rep_count;
@ -67,13 +52,8 @@ inline void to_json(nlohmann::json& j, const TaskData& a) {
j["report"]["status"] = StatusCodeToString(a.status); j["report"]["status"] = StatusCodeToString(a.status);
} }
inline void from_json(const nlohmann::json& j, ReadTaskData& a) {
const uint32_t size_count = j["task"]["size_count"].template get<uint32_t>();
for (uint32_t i = 0; i < size_count; i++) {
a.sizes.emplace_back(j["task"]["size"][i].template get<uint32_t>());
}
j["task"]["iterations"].get_to(a.rep_count);
inline void from_json(const nlohmann::json& j, TaskData& a) {
j["task"]["size"].get_to(a.size);
j["task"]["batching"]["batch_submit"].get_to(a.batch_submit); j["task"]["batching"]["batch_submit"].get_to(a.batch_submit);
j["task"]["batching"]["batch_size"].get_to(a.batch_size); j["task"]["batching"]["batch_size"].get_to(a.batch_size);
j["affinity"]["node"].get_to(a.numa_node); j["affinity"]["node"].get_to(a.numa_node);
@ -81,20 +61,6 @@ inline void from_json(const nlohmann::json& j, ReadTaskData& a) {
j["affinity"]["nnode_dst"].get_to(a.nnode_dst); j["affinity"]["nnode_dst"].get_to(a.nnode_dst);
} }
inline void ReadTaskData::AddToTaskVector(std::vector<TaskData>& v) const {
for (const auto s : sizes) {
TaskData t;
t.size = s;
t.rep_count = rep_count;
t.batch_submit = batch_submit;
t.batch_size = batch_size;
t.numa_node = numa_node;
t.nnode_dst = nnode_dst;
t.nnode_src = nnode_src;
v.emplace_back(t);
}
}
inline void WriteResultLog(const std::vector<TaskData>& args, const std::string& path, std::ostream& os) { inline void WriteResultLog(const std::vector<TaskData>& args, const std::string& path, std::ostream& os) {
nlohmann::json json; nlohmann::json json;
@ -110,15 +76,10 @@ inline void ReadWorkDescription(std::vector<TaskData>& args, std::string& path,
is >> json; is >> json;
const uint32_t count = json.at("count"); const uint32_t count = json.at("count");
std::vector<ReadTaskData> rtd;
rtd.resize(count);
args.resize(count);
path = json.at("path"); path = json.at("path");
for (uint32_t i = 0; i < count; i++) { for (uint32_t i = 0; i < count; i++) {
rtd[i] = json["list"][i].template get<ReadTaskData>();
}
for (const auto& e : rtd) {
e.AddToTaskVector(args);
args[i] = json["list"][i].template get<TaskData>();
} }
} }

5
benchmarks/task-description.json

@ -1,11 +1,10 @@
{ {
"count": 1, "count": 1,
"path" : "hw",
"path" : "sw",
"list": [ "list": [
{ {
"task": { "task": {
"size_count": 5,
"size": [ 1024, 4096, 1048576, 134217728, 1073741824 ],
"size": 4096,
"iterations": 1000, "iterations": 1000,
"batching": { "batching": {
"batch_submit": false, "batch_submit": false,

Loading…
Cancel
Save