|
|
#pragma once
#include <iostream>
#include <vector>
#include <chrono>
#include <numeric>
#include <pthread.h>
#include <semaphore.h>
#include <numa.h>
#include <dml/dml.hpp>
#include "barrier.hpp"
#include "statuscode-tostring.hpp"
#include "task-data.hpp"
double avg(const std::vector<uint64_t>& v) { return static_cast<long double>(std::accumulate(v.begin(), v.end(), 0)) / static_cast<long double>(v.size()); }
double stdev(const std::vector<uint64_t>& v, const double mean) { std::vector<double> diff(v.size()); std::transform(v.begin(), v.end(), diff.begin(), [mean](double x) { return x - mean; }); const double sq_sum = std::inner_product(diff.begin(), diff.end(), diff.begin(), 0.0); const double stdev = std::sqrt(sq_sum / static_cast<double>(v.size())); return stdev; }
#define LOG_CODE_INFO "Location: " << __FILE__ << "@" << __LINE__ << "::" << __FUNCTION__ << std::endl
#define LOG_ERR { pthread_t t = pthread_self(); std::cerr << "--- BEGIN ERROR MSG ---" << std::endl << "Physical: [Node " << args->numa_node << " | Thread " << t << "]" << std::endl; } std::cerr << LOG_CODE_INFO
#define CHECK_STATUS(status,msg) { if (status != dml::status_code::ok) { LOG_ERR << "Status Code: " << StatusCodeToString(status) << std::endl << msg << std::endl; args->status = status; return nullptr; }}
#define ADD_TIMING_MESSUREMENT { if (i >= 5) { submission_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(se - st).count()); completion_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - se).count()); combined_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - st).count());}}
template <typename path> void* thread_function(void* argp) { TaskData* args = reinterpret_cast<TaskData*>(argp);
std::vector<uint64_t> submission_durations; std::vector<uint64_t> completion_durations; std::vector<uint64_t> combined_durations;
// set numa node and core affinity of the current thread
numa_run_on_node(args->numa_node); // allocate memory for the move operation on the requested numa nodes
void* src = numa_alloc_onnode(args->size, args->nnode_src); void* dst = numa_alloc_onnode(args->size, args->nnode_dst); dml::data_view srcv = dml::make_view(reinterpret_cast<uint8_t*>(src), args->size); dml::data_view dstv = dml::make_view(reinterpret_cast<uint8_t*>(dst), args->size);
args->status = dml::status_code::ok; args->rep_completed = 0;
// we add 5 as the first 5 iterations will not be meassured
// to remove exceptional values encountered during warmup
for (uint32_t i = 0; i < args->rep_count + 5; i++) { // synchronize the start of each iteration
// using the barrier structure
args->barrier_->wait();
if (args->batch_submit) { const auto st = std::chrono::steady_clock::now();
auto sequence = dml::sequence(args->batch_size, std::allocator<dml::byte_t>());
for (uint32_t j = 0; j < args->batch_size; j++) { // block_on_fault() is required to submit the task in a way so that the
// DSA engine can handle page faults itself together with the IOMMU which
// requires the WQ to be configured to allow this too
const auto status = sequence.add(dml::mem_copy.block_on_fault(), srcv, dstv); CHECK_STATUS(status, "Adding operation to batch failed!"); }
// we use the asynchronous submit-routine even though this is not required
// here, however the project later on will only use async operation and
// therefore this behaviour should be benchmarked
auto handler = dml::submit<path>(dml::batch, sequence);
const auto se = std::chrono::steady_clock::now();
auto result = handler.get();
const auto et = std::chrono::steady_clock::now();
const dml::status_code status = result.status; CHECK_STATUS(status, "Batch completed with an Error!");
ADD_TIMING_MESSUREMENT; } else if (args->batch_size > 1) { // implementation for non-batched batch submit follows here
// this means we submit a bunch of work as single descriptors
// but then dont wait for the completion immediately
std::vector<dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>>> handlers;
const auto st = std::chrono::steady_clock::now();
for (uint32_t j = 0; j < args->batch_size; j++) { // block_on_fault() is required to submit the task in a way so that the
// DSA engine can handle page faults itself together with the IOMMU which
// requires the WQ to be configured to allow this too
handlers.emplace_back(dml::submit<path>(dml::mem_copy.block_on_fault(), srcv, dstv)); }
const auto se = std::chrono::steady_clock::now();
for (auto& handler : handlers) { auto result = handler.get(); const dml::status_code status = result.status; CHECK_STATUS(status, "Operation completed with an Error!"); }
const auto et = std::chrono::steady_clock::now();
ADD_TIMING_MESSUREMENT; } else { const auto st = std::chrono::steady_clock::now();
// we use the asynchronous submit-routine even though this is not required
// here, however the project later on will only use async operation and
// therefore this behaviour should be benchmarked
// block_on_fault() is required to submit the task in a way so that the
// DSA engine can handle page faults itself together with the IOMMU which
// requires the WQ to be configured to allow this too
auto handler = dml::submit<path>(dml::mem_copy.block_on_fault(), srcv, dstv);
const auto se = std::chrono::steady_clock::now();
auto result = handler.get();
const auto et = std::chrono::steady_clock::now();
const dml::status_code status = result.status; CHECK_STATUS(status, "Operation completed with an Error!");
ADD_TIMING_MESSUREMENT; }
// again: we do not count the first 5 repetitions
if (i >= 5) args->rep_completed++; }
// free the allocated memory regions on the selected nodes
numa_free(src, args->size); numa_free(dst, args->size);
args->combined_duration = avg(combined_durations); args->complete_duration = avg(completion_durations); args->submit_duration = avg(submission_durations); args->combined_duration_stdev = stdev(combined_durations, args->combined_duration); args->complete_duration_stdev = stdev(completion_durations, args->complete_duration); args->submit_duration_stdev = stdev(submission_durations, args->submit_duration);
return nullptr; }
template <typename path> void execute_dml_memcpy(std::vector<TaskData>& args) { barrier task_barrier(args.size()); std::vector<pthread_t> threads;
// initialize numa library
numa_available();
// for each submitted task we link the semaphore
// and create the thread, passing the argument
for (auto& arg : args) { arg.barrier_ = &task_barrier; threads.emplace_back();
if (pthread_create(&threads.back(), nullptr, thread_function<path>, &arg) != 0) { std::cerr << "Error creating thread" << std::endl; exit(1); } }
for (pthread_t& t : threads) { pthread_join(t, nullptr); } }
|