This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

117 lines
3.2 KiB

#include <iostream>
#include <random>
#include "cache.hpp"
dsacache::Cache CACHE;
double* GetRandomArray(const size_t size) {
double* array = new double[size];
std::uniform_real_distribution<double> unif(std::numeric_limits<double>::min(), std::numeric_limits<double>::max());
std::default_random_engine re;
for (size_t i = 0; i < size; i++) {
array[i] = unif(re);
}
return array;
}
bool IsEqual(const double* a, const double* b, const size_t size) {
for (size_t i = 0; i < size; i++) {
try {
if (a[i] != b[i]) return false;
}
catch (...) {
return false;
}
}
return true;
}
void PerformAccessAndTest(double* src, const size_t size) {
std::unique_ptr<dsacache::CacheData> data_cache = CACHE.Access(
reinterpret_cast<uint8_t *>(src),
size * sizeof(double)
);
double* cached_imm = reinterpret_cast<double *>(data_cache->GetDataLocation());
// check the value immediately just to see if ram or cache was returned
if (src == cached_imm) {
std::cout << "Caching did not immediately yield different data location." << std::endl;
}
else if (cached_imm == nullptr) {
std::cout << "Immediately got nullptr." << std::endl;
}
else {
std::cout << "Immediately got different data location." << std::endl;
}
// waits for the completion of the asynchronous caching operation
data_cache->WaitOnCompletion();
// gets the cache-data-location from the struct
double* cached = reinterpret_cast<double *>(data_cache->GetDataLocation());
// tests on the resulting value
if (src == cached) {
std::cout << "Caching did not affect data location." << std::endl;
}
else if (cached == nullptr) {
std::cout << "Got nullptr from cache." << std::endl;
}
else {
std::cout << "Got different data location from cache." << std::endl;
}
if (IsEqual(src,cached,size)) {
std::cout << "Cached data is correct." << std::endl;
}
else {
std::cout << "Cached data is wrong." << std::endl;
}
}
int main(int argc, char **argv) {
// given numa destination and source node and the size of the data
// this function decides on which the data will be placed
// which is used to select the HBM-node for the dst-node if desired
auto cache_policy = [](const int numa_dst_node, const int numa_src_node, const size_t data_size) {
return numa_dst_node;
};
// this function receives the memory source and destination node
// and then decides, on which nodes the copy operation will be split
auto copy_policy = [](const int numa_dst_node, const int numa_src_node) {
return std::vector{ numa_src_node, numa_dst_node };
};
// initializes the cache with the two policies
CACHE.Init(cache_policy,copy_policy);
// generate the test data
static constexpr size_t data_size = 1024 * 1024;
double* data = GetRandomArray(data_size);
std::cout << "--- first access --- " << std::endl;
PerformAccessAndTest(data, data_size);
std::cout << "--- second access --- " << std::endl;
PerformAccessAndTest(data, data_size);
std::cout << "--- end of application --- " << std::endl;
}