This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

184 lines
6.2 KiB

#pragma once
#include <iostream>
#include <vector>
#include <chrono>
#include <pthread.h>
#include <semaphore.h>
#include <numa.h>
#include <dml/dml.hpp>
#include "statuscode-tostring.hpp"
struct ThreadArgs {
// thread placement / engine selection
uint8_t numa_node;
uint8_t core;
// region size and source+destination for move
size_t size;
uint8_t nnode_src;
uint8_t nnode_dst;
// repetition
uint32_t rep_count;
bool batch_submit;
uint32_t batch_size;
uint32_t barrier_after_n_operations;
// thread output
dml::status_code status;
// average run duration in microseconds
double combined_duration;
double submit_duration;
double complete_duration;
// completed iterations
uint32_t rep_completed;
// set by execution
sem_t* sig;
};
double avg(const std::vector<double>& v) {
int n = 0;
double mean = 0.0;
for (const auto x : v) {
const double delta = static_cast<double>(x) - mean;
mean += delta / ++n;
}
return mean;
}
#define LOG_CODE_INFO "Location: " << __FILE__ << "@" << __LINE__ << "::" << __FUNCTION__ << std::endl
#define LOG_ERR { pthread_t t = pthread_self(); std::cerr << "--- BEGIN ERROR MSG ---" << std::endl << "Physical: [Node " << args->numa_node << " | Core " << args->core << " | Thread " << t << "]" << std::endl; } std::cerr << LOG_CODE_INFO
#define CHECK_STATUS(status,msg) { if (status != dml::status_code::ok) { LOG_ERR << "Status Code: " << StatusCodeToString(status) << std::endl << #msg << std::endl; args->status = status; return nullptr; }}
template <typename path>
void* thread_function(void* argp) {
ThreadArgs* args = reinterpret_cast<ThreadArgs*>(argp);
std::vector<double> submission_durations;
std::vector<double> completion_durations;
std::vector<double> combined_durations;
// set numa node and core affinity of the current thread
numa_run_on_node(args->numa_node);
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(args->core, &cpuset);
if (pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) != 0) {
LOG_ERR << "Error setting affinity for thread" << std::endl;
return nullptr;
}
// allocate memory for the move operation on the requested numa nodes
void* src = numa_alloc_onnode(args->size, args->nnode_src);
void* dst = numa_alloc_onnode(args->size, args->nnode_dst);
dml::data_view srcv = dml::make_view(reinterpret_cast<uint8_t*>(src), args->size);
dml::data_view dstv = dml::make_view(reinterpret_cast<uint8_t*>(dst), args->size);
args->status = dml::status_code::ok;
args->rep_completed = 0;
// wait for specified signal so that all operations start at the same time
sem_wait(args->sig);
for (uint32_t i = 0; i < args->rep_count; i++) {
if (args->batch_submit) {
uint32_t opcount = args->batch_size;
if (args->barrier_after_n_operations > 0) {
opcount += opcount / args->barrier_after_n_operations;
}
const auto st = std::chrono::high_resolution_clock::now();
auto sequence = dml::sequence(opcount, std::allocator<dml::byte_t>());
for (uint32_t j = 0; j < args->batch_size; j++) {
const auto status = sequence.add(dml::mem_copy, srcv, dstv);
if (j % args->barrier_after_n_operations == 0) {
sequence.add(dml::nop);
}
}
auto handler = dml::submit<path>(dml::batch, sequence);
const auto se = std::chrono::high_resolution_clock::now();
auto result = handler.get();
const auto et = std::chrono::high_resolution_clock::now();
submission_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(se - st).count());
completion_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - se).count());
combined_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - st).count());
}
else {
const auto st = std::chrono::high_resolution_clock::now();
// we use the asynchronous submit-routine even though this is not required
// here, however the project later on will only use async operation and
// therefore this behaviour should be benchmarked
auto handler = dml::submit<path>(dml::mem_copy, srcv, dstv);
const auto se = std::chrono::high_resolution_clock::now();
auto result = handler.get();
const auto et = std::chrono::high_resolution_clock::now();
const dml::status_code status = result.status;
CHECK_STATUS(status, "Operation completed with an Error!");
submission_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(se - st).count());
completion_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - se).count());
combined_durations.emplace_back(std::chrono::duration_cast<std::chrono::microseconds>(et - st).count());
}
args->rep_completed++;
}
// free the allocated memory regions on the selected nodes
numa_free(src, args->size);
numa_free(dst, args->size);
args->combined_duration = avg(combined_durations);
args->complete_duration = avg(completion_durations);
args->submit_duration = avg(submission_durations);
args->sig = nullptr;
return nullptr;
}
template <typename path>
void execute_dml_memcpy(std::vector<ThreadArgs>& args) {
sem_t sem;
std::vector<pthread_t> threads;
// initialize semaphore and numactl-library
sem_init(&sem, 0, 0);
numa_available();
// for each submitted task we link the semaphore
// and create the thread, passing the argument
for (auto& arg : args) {
arg.sig = &sem;
threads.emplace_back();
if (pthread_create(&threads.back(), nullptr, thread_function<path>, &arg) != 0) {
std::cerr << "Error creating thread" << std::endl;
exit(1);
}
}
// post will make all waiting threads pass
sem_post(&sem);
for (pthread_t& t : threads) {
pthread_join(t, nullptr);
}
sem_destroy(&sem);
}