You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
124 lines
4.4 KiB
124 lines
4.4 KiB
#pragma once
|
|
|
|
#include <iostream>
|
|
#include <vector>
|
|
#include <chrono>
|
|
#include <numeric>
|
|
#include <future>
|
|
|
|
#include <pthread.h>
|
|
#include <semaphore.h>
|
|
#include <numa.h>
|
|
|
|
#include <dml/dml.hpp>
|
|
|
|
#include "util/dml-helper.hpp"
|
|
#include "util/task-data.hpp"
|
|
|
|
#define LOG_CODE_INFO "Location: " << __FILE__ << "@" << __LINE__ << "::" << __FUNCTION__ << std::endl
|
|
#define LOG_ERR { pthread_t t = pthread_self(); std::cerr << "--- BEGIN ERROR MSG ---" << std::endl << "Physical: [Node " << args->numa_node << " | Thread " << t << "]" << std::endl; } std::cerr << LOG_CODE_INFO
|
|
#define CHECK_STATUS(status,msg) { if (status != dml::status_code::ok) { LOG_ERR << "Status Code: " << StatusCodeToString(status) << std::endl << msg << std::endl; args->status = status; return nullptr; }}
|
|
|
|
std::shared_future<void> LAUNCH_;
|
|
|
|
template <typename path>
|
|
void* thread_function(void* argp) {
|
|
TaskData* args = reinterpret_cast<TaskData*>(argp);
|
|
|
|
// set numa node and core affinity of the current thread
|
|
numa_run_on_node(args->numa_node);
|
|
|
|
// allocate memory for the move operation on the requested numa nodes
|
|
void* src = numa_alloc_onnode(args->size, args->nnode_src);
|
|
void* dst = numa_alloc_onnode(args->size, args->nnode_dst);
|
|
dml::data_view srcv = dml::make_view(reinterpret_cast<uint8_t*>(src), args->size);
|
|
dml::data_view dstv = dml::make_view(reinterpret_cast<uint8_t*>(dst), args->size);
|
|
|
|
std::memset(src, 0, args->size);
|
|
std::memset(dst, 0, args->size);
|
|
|
|
args->status = dml::status_code::ok;
|
|
args->rep_completed = 0;
|
|
|
|
LAUNCH_.wait();
|
|
|
|
if (args->batch_size > 1) {
|
|
auto sequence = dml::sequence(args->batch_size, std::allocator<dml::byte_t>());
|
|
|
|
for (uint32_t j = 0; j < args->batch_size; j++) {
|
|
// block_on_fault() is required to submit the task in a way so that the
|
|
// DSA engine can handle page faults itself together with the IOMMU which
|
|
// requires the WQ to be configured to allow this too
|
|
|
|
const auto status = sequence.add(dml::mem_copy.block_on_fault(), srcv, dstv);
|
|
CHECK_STATUS(status, "Adding operation to batch failed!");
|
|
}
|
|
|
|
// we use the asynchronous submit-routine even though this is not required
|
|
// here, however the project later on will only use async operation and
|
|
// therefore this behaviour should be benchmarked
|
|
|
|
auto handler = dml::submit<path>(dml::batch, sequence);
|
|
|
|
auto result = handler.get();
|
|
|
|
const dml::status_code status = result.status;
|
|
CHECK_STATUS(status, "Batch completed with an Error!");
|
|
}
|
|
else {
|
|
// we use the asynchronous submit-routine even though this is not required
|
|
// here, however the project later on will only use async operation and
|
|
// therefore this behaviour should be benchmarked
|
|
// block_on_fault() is required to submit the task in a way so that the
|
|
// DSA engine can handle page faults itself together with the IOMMU which
|
|
// requires the WQ to be configured to allow this too
|
|
auto handler = dml::submit<path>(dml::mem_copy.block_on_fault(), srcv, dstv);
|
|
|
|
auto result = handler.get();
|
|
|
|
const dml::status_code status = result.status;
|
|
CHECK_STATUS(status, "Operation completed with an Error!");
|
|
}
|
|
|
|
// free the allocated memory regions on the selected nodes
|
|
numa_free(src, args->size);
|
|
numa_free(dst, args->size);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
template <typename path>
|
|
std::vector<uint64_t> execute_dml_memcpy(std::vector<TaskData>& args, const uint64_t iterations) {
|
|
std::vector<std::thread> threads;
|
|
std::vector<uint64_t> timing;
|
|
|
|
// initialize numa library
|
|
numa_available();
|
|
|
|
// for each submitted task we link the semaphore
|
|
// and create the thread, passing the argument
|
|
|
|
for (uint64_t i = 0; i < iterations; i++) {
|
|
std::promise<void> launch_promise;
|
|
LAUNCH_ = launch_promise.get_future();
|
|
|
|
for (auto& arg : args) {
|
|
threads.emplace_back(thread_function<path>, &arg);
|
|
}
|
|
|
|
using namespace std::chrono_literals;
|
|
std::this_thread::sleep_for(1000ms);
|
|
|
|
const auto time_start = std::chrono::steady_clock::now();
|
|
|
|
launch_promise.set_value();
|
|
|
|
for(std::thread& t : threads) { t.join(); }
|
|
|
|
const auto time_end = std::chrono::steady_clock::now();
|
|
|
|
timing.emplace_back(std::chrono::duration_cast<std::chrono::nanoseconds>(time_end - time_start).count());
|
|
}
|
|
|
|
return timing;
|
|
}
|