Browse Source

remove fractional average calculation in favour of std::accumulate and remove option to set thread affinity to core while keeping support for node affinity assignment

master
Constantin Fürst 1 year ago
parent
commit
80d1b5f543
  1. 30
      benchmarks/benchmark.hpp
  2. 7
      benchmarks/task-data.hpp
  3. 3
      benchmarks/task-description.json

30
benchmarks/benchmark.hpp

@ -3,6 +3,7 @@
#include <iostream>
#include <vector>
#include <chrono>
#include <numeric>
#include <pthread.h>
#include <semaphore.h>
@ -13,39 +14,24 @@
#include "statuscode-tostring.hpp"
#include "task-data.hpp"
double avg(const std::vector<double>& v) {
int n = 0;
double mean = 0.0;
for (const auto x : v) {
const double delta = static_cast<double>(x) - mean;
mean += delta / ++n;
}
return mean;
double avg(const std::vector<uint64_t>& v) {
return static_cast<long double>(std::accumulate(v.begin(), v.end(), 0)) / static_cast<long double>(v.size());
}
#define LOG_CODE_INFO "Location: " << __FILE__ << "@" << __LINE__ << "::" << __FUNCTION__ << std::endl
#define LOG_ERR { pthread_t t = pthread_self(); std::cerr << "--- BEGIN ERROR MSG ---" << std::endl << "Physical: [Node " << args->numa_node << " | Core " << args->core << " | Thread " << t << "]" << std::endl; } std::cerr << LOG_CODE_INFO
#define CHECK_STATUS(status,msg) { if (status != dml::status_code::ok) { LOG_ERR << "Status Code: " << StatusCodeToString(status) << std::endl << #msg << std::endl; args->status = status; return nullptr; }}
#define LOG_ERR { pthread_t t = pthread_self(); std::cerr << "--- BEGIN ERROR MSG ---" << std::endl << "Physical: [Node " << args->numa_node << " | Thread " << t << "]" << std::endl; } std::cerr << LOG_CODE_INFO
#define CHECK_STATUS(status,msg) { if (status != dml::status_code::ok) { LOG_ERR << "Status Code: " << StatusCodeToString(status) << std::endl << ##msg << std::endl; args->status = status; return nullptr; }}
template <typename path>
void* thread_function(void* argp) {
TaskData* args = reinterpret_cast<TaskData*>(argp);
std::vector<double> submission_durations;
std::vector<double> completion_durations;
std::vector<double> combined_durations;
std::vector<uint64_t> submission_durations;
std::vector<uint64_t> completion_durations;
std::vector<uint64_t> combined_durations;
// set numa node and core affinity of the current thread
numa_run_on_node(args->numa_node);
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(args->core, &cpuset);
if (pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) != 0) {
LOG_ERR << "Error setting affinity for thread" << std::endl;
return nullptr;
}
// allocate memory for the move operation on the requested numa nodes
void* src = numa_alloc_onnode(args->size, args->nnode_src);

7
benchmarks/task-data.hpp

@ -9,7 +9,6 @@
struct TaskData {
// thread placement / engine selection
uint8_t numa_node;
uint8_t core;
// region size and source+destination for move
size_t size;
uint8_t nnode_src;
@ -38,8 +37,7 @@ inline void to_json(nlohmann::json& j, const TaskData& a) {
j["task"]["batching"]["enabled"] = a.batch_submit;
j["task"]["batching"]["batch_size"] = a.batch_size;
j["task"]["batching"]["barrier_after_n_operations"] = a.barrier_after_n_operations;
j["affinity"]["cpu_core"] = a.core;
j["affinity"]["numa"] = a.numa_node;
j["affinity"]["node"] = a.numa_node;
j["affinity"]["nnode_src"] = a.nnode_src;
j["affinity"]["nnode_dst"] = a.nnode_dst;
j["time"]["unit"] = "microseconds";
@ -56,8 +54,7 @@ inline void from_json(const nlohmann::json& j, TaskData& a) {
j["task"]["batching"]["enabled"].get_to(a.batch_submit);
j["task"]["batching"]["batch_size"].get_to(a.batch_size);
j["task"]["batching"]["barrier_after_n_operations"].get_to(a.barrier_after_n_operations);
j["affinity"]["numa"].get_to(a.numa_node);
j["affinity"]["core"].get_to(a.core);
j["affinity"]["node"].get_to(a.numa_node);
j["affinity"]["nnode_src"].get_to(a.nnode_src);
j["affinity"]["nnode_dst"].get_to(a.nnode_dst);
}

3
benchmarks/task-description.json

@ -15,8 +15,7 @@
}
},
"affinity": {
"numa": 0,
"core": 0,
"node": 0,
"nnode_src": 0,
"nnode_dst": 0
}

Loading…
Cancel
Save