Browse Source

implement batch operation and add control parameters to the ThreadArgs struct, also add more timing information: now submission and completion will be timed separately

master
Constantin Fürst 1 year ago
parent
commit
7e8c9acbc3
  1. 98
      benchmarks/execute-move.hpp

98
benchmarks/execute-move.hpp

@ -10,6 +10,8 @@
#include <dml/dml.hpp> #include <dml/dml.hpp>
#include "statuscode-tostring.hpp"
struct ThreadArgs { struct ThreadArgs {
// thread placement / engine selection // thread placement / engine selection
uint8_t numa_node; uint8_t numa_node;
@ -18,24 +20,54 @@ struct ThreadArgs {
size_t size; size_t size;
uint8_t nnode_src; uint8_t nnode_src;
uint8_t nnode_dst; uint8_t nnode_dst;
// repetition
uint32_t rep_count;
bool batch_submit;
uint32_t batch_size;
uint32_t barrier_after_n_operations;
// thread output // thread output
dml::status_code status; dml::status_code status;
std::chrono::microseconds duration;
// average run duration in microseconds
double combined_duration;
double submit_duration;
double complete_duration;
// completed iterations
uint32_t rep_completed;
// set by execution // set by execution
sem_t* sig; sem_t* sig;
}; };
double avg(const std::vector<double>& v) {
int n = 0;
double mean = 0.0;
for (const auto x : v) {
const double delta = static_cast<double>(x) - mean;
mean += delta / ++n;
}
return mean;
}
#define LOG_CODE_INFO "Location: " << __FILE__ << "@" << __LINE__ << "::" << __FUNCTION__ << std::endl
#define LOG_ERR { pthread_t t = pthread_self(); std::cerr << "--- BEGIN ERROR MSG ---" << std::endl << "Physical: [Node " << args->numa_node << " | Core " << args->core << " | Thread " << t << "]" << std::endl; } std::cerr << LOG_CODE_INFO
#define CHECK_STATUS(status,msg) { if (status != dml::status_code::ok) { LOG_ERR << "Status Code: " << StatusCodeToString(status) << std::endl << #msg << std::endl; args->status = status; return nullptr; }}
template <typename path> template <typename path>
void* thread_function(void* argp) { void* thread_function(void* argp) {
ThreadArgs* args = reinterpret_cast<ThreadArgs*>(argp); ThreadArgs* args = reinterpret_cast<ThreadArgs*>(argp);
std::vector<double> submission_durations;
std::vector<double> completion_durations;
std::vector<double> combined_durations;
// set numa node and core affinity of the current thread // set numa node and core affinity of the current thread
numa_run_on_node(args->numa_node); numa_run_on_node(args->numa_node);
cpu_set_t cpuset; cpu_set_t cpuset;
CPU_ZERO(&cpuset); CPU_ZERO(&cpuset);
CPU_SET(args->core, &cpuset); CPU_SET(args->core, &cpuset);
if (pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) != 0) { if (pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) != 0) {
std::cerr << "Error setting affinity for thread designated to core " << args->core << " on node " << args->numa_node << std::endl;
LOG_ERR << "Error setting affinity for thread" << std::endl;
return nullptr; return nullptr;
} }
@ -45,31 +77,83 @@ void* thread_function(void* argp) {
dml::data_view srcv = dml::make_view(reinterpret_cast<uint8_t*>(src), args->size); dml::data_view srcv = dml::make_view(reinterpret_cast<uint8_t*>(src), args->size);
dml::data_view dstv = dml::make_view(reinterpret_cast<uint8_t*>(dst), args->size); dml::data_view dstv = dml::make_view(reinterpret_cast<uint8_t*>(dst), args->size);
args->status = dml::status_code::ok;
args->rep_completed = 0;
// wait for specified signal so that all operations start at the same time // wait for specified signal so that all operations start at the same time
sem_wait(args->sig); sem_wait(args->sig);
for (uint32_t i = 0; i < args->rep_count; i++) {
if (args->batch_submit) {
uint32_t opcount = args->batch_size;
if (args->barrier_after_n_operations > 0) {
opcount += opcount / args->barrier_after_n_operations;
}
const auto st = std::chrono::high_resolution_clock::now();
auto sequence = dml::sequence(opcount, std::allocator<dml::byte_t>());
for (uint32_t j = 0; j < args->batch_size; j++) {
const auto status = sequence.add(dml::mem_copy, srcv, dstv);
if (j % args->barrier_after_n_operations == 0) {
sequence.add(dml::nop);
}
}
auto handler = dml::submit<path>(dml::batch, sequence);
const auto se = std::chrono::high_resolution_clock::now();
auto result = handler.get();
const auto et = std::chrono::high_resolution_clock::now();
submission_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(se - st).count());
completion_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - se).count());
combined_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - st).count());
}
else {
const auto st = std::chrono::high_resolution_clock::now(); const auto st = std::chrono::high_resolution_clock::now();
// we use the asynchronous submit-routine even though this is not required // we use the asynchronous submit-routine even though this is not required
// here, however the project later on will only use async operation
auto handler = dml::submit<path>(dml::mem_move, srcv, dstv);
// here, however the project later on will only use async operation and
// therefore this behaviour should be benchmarked
auto handler = dml::submit<path>(dml::mem_copy, srcv, dstv);
const auto se = std::chrono::high_resolution_clock::now();
auto result = handler.get(); auto result = handler.get();
const auto et = std::chrono::high_resolution_clock::now(); const auto et = std::chrono::high_resolution_clock::now();
const dml::status_code status = result.status;
CHECK_STATUS(status, "Operation completed with an Error!");
submission_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(se - st).count());
completion_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - se).count());
combined_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - st).count());
}
args->rep_completed++;
}
// free the allocated memory regions on the selected nodes // free the allocated memory regions on the selected nodes
numa_free(src, args->size); numa_free(src, args->size);
numa_free(dst, args->size); numa_free(dst, args->size);
args->duration = std::chrono::duration_cast<std::chrono::microseconds>(et - st);
args->status = result.status;
args->combined_duration = avg(combined_durations);
args->complete_duration = avg(completion_durations);
args->submit_duration = avg(submission_durations);
args->sig = nullptr; args->sig = nullptr;
return nullptr; return nullptr;
} }
template <typename path> template <typename path>
void execute_mem_move(std::vector<ThreadArgs>& args) {
void execute_dml_memcpy(std::vector<ThreadArgs>& args) {
sem_t sem; sem_t sem;
std::vector<pthread_t> threads; std::vector<pthread_t> threads;

Loading…
Cancel
Save