|
@ -14,62 +14,22 @@ |
|
|
|
|
|
|
|
|
#include "../../offloading-cacher/cache.hpp"
|
|
|
#include "../../offloading-cacher/cache.hpp"
|
|
|
|
|
|
|
|
|
#include "BenchmarkHelpers.cpp"
|
|
|
|
|
|
|
|
|
|
|
|
#define MODE_PREFETCH
|
|
|
|
|
|
|
|
|
|
|
|
////////////////////////////////
|
|
|
|
|
|
/// BENCHMARK SETUP
|
|
|
|
|
|
|
|
|
|
|
|
constexpr size_t WL_SIZE_B = 4_GiB; |
|
|
|
|
|
constexpr uint32_t WARMUP_ITERATION_COUNT = 5; |
|
|
|
|
|
constexpr uint32_t ITERATION_COUNT = 5; |
|
|
|
|
|
|
|
|
|
|
|
#ifdef MODE_PREFETCH
|
|
|
|
|
|
constexpr uint32_t GROUP_COUNT = 16; |
|
|
|
|
|
constexpr size_t CHUNK_SIZE_B = 8_MiB; |
|
|
|
|
|
constexpr uint32_t TC_SCANA = 2; |
|
|
|
|
|
constexpr uint32_t TC_SCANB = 1; |
|
|
|
|
|
constexpr uint32_t TC_AGGRJ = 2; |
|
|
|
|
|
constexpr bool PERFORM_CACHING = true; |
|
|
|
|
|
constexpr bool DATA_IN_HBM = false; |
|
|
|
|
|
constexpr char MODE_STRING[] = "prefetch"; |
|
|
|
|
|
#endif
|
|
|
|
|
|
#ifdef MODE_DRAM
|
|
|
|
|
|
constexpr size_t CHUNK_SIZE_B = 2_MiB; |
|
|
|
|
|
constexpr uint32_t GROUP_COUNT = 8; |
|
|
|
|
|
constexpr uint32_t TC_SCANA = 2; |
|
|
|
|
|
constexpr uint32_t TC_SCANB = 0; |
|
|
|
|
|
constexpr uint32_t TC_AGGRJ = 1; |
|
|
|
|
|
constexpr bool PERFORM_CACHING = false; |
|
|
|
|
|
constexpr bool DATA_IN_HBM = false; |
|
|
|
|
|
constexpr char MODE_STRING[] = "dram"; |
|
|
|
|
|
#endif
|
|
|
|
|
|
#ifdef MODE_HBM
|
|
|
|
|
|
constexpr size_t CHUNK_SIZE_B = 2_MiB; |
|
|
|
|
|
constexpr uint32_t GROUP_COUNT = 8; |
|
|
|
|
|
constexpr uint32_t TC_SCANA = 2; |
|
|
|
|
|
constexpr uint32_t TC_SCANB = 0; |
|
|
|
|
|
constexpr uint32_t TC_AGGRJ = 1; |
|
|
|
|
|
constexpr bool PERFORM_CACHING = false; |
|
|
|
|
|
constexpr bool DATA_IN_HBM = true; |
|
|
|
|
|
constexpr char MODE_STRING[] = "hbm"; |
|
|
|
|
|
|
|
|
#ifndef MODE_SET_BY_CMAKE
|
|
|
|
|
|
#define MODE_COMPLEX_PREFETCH
|
|
|
#endif
|
|
|
#endif
|
|
|
|
|
|
|
|
|
/// DO NOT CONFIGURE BEYOND THIS
|
|
|
|
|
|
////////////////////////////////
|
|
|
|
|
|
|
|
|
#include "BenchmarkModes.hpp"
|
|
|
|
|
|
#include "BenchmarkHelpers.cpp"
|
|
|
|
|
|
|
|
|
constexpr uint64_t CMP_A = 50; |
|
|
constexpr uint64_t CMP_A = 50; |
|
|
|
|
|
constexpr uint64_t CMP_B = 42; |
|
|
constexpr uint32_t TC_COMBINED = TC_SCANA + TC_SCANB + TC_AGGRJ; |
|
|
constexpr uint32_t TC_COMBINED = TC_SCANA + TC_SCANB + TC_AGGRJ; |
|
|
constexpr size_t WL_SIZE_ELEMENTS = WL_SIZE_B / sizeof(uint64_t); |
|
|
constexpr size_t WL_SIZE_ELEMENTS = WL_SIZE_B / sizeof(uint64_t); |
|
|
constexpr size_t CHUNK_COUNT = WL_SIZE_B / CHUNK_SIZE_B; |
|
|
constexpr size_t CHUNK_COUNT = WL_SIZE_B / CHUNK_SIZE_B; |
|
|
constexpr size_t CHUNK_SIZE_ELEMENTS = CHUNK_SIZE_B / sizeof(uint64_t); |
|
|
constexpr size_t CHUNK_SIZE_ELEMENTS = CHUNK_SIZE_B / sizeof(uint64_t); |
|
|
constexpr size_t RUN_COUNT = CHUNK_COUNT / GROUP_COUNT; |
|
|
constexpr size_t RUN_COUNT = CHUNK_COUNT / GROUP_COUNT; |
|
|
constexpr size_t MASK_ELEMENT_SIZE = 16; |
|
|
|
|
|
constexpr size_t MASK_STEP_SIZE = CHUNK_SIZE_ELEMENTS / MASK_ELEMENT_SIZE; |
|
|
|
|
|
|
|
|
|
|
|
static_assert(RUN_COUNT > 0); |
|
|
static_assert(RUN_COUNT > 0); |
|
|
static_assert(TC_SCANB <= TC_AGGRJ); |
|
|
|
|
|
static_assert(WL_SIZE_B % 16 == 0); |
|
|
static_assert(WL_SIZE_B % 16 == 0); |
|
|
static_assert(CHUNK_SIZE_B % 16 == 0); |
|
|
static_assert(CHUNK_SIZE_B % 16 == 0); |
|
|
|
|
|
|
|
@ -96,6 +56,7 @@ std::shared_future<void> LAUNCH_; |
|
|
uint64_t* DATA_A_; |
|
|
uint64_t* DATA_A_; |
|
|
uint64_t* DATA_B_; |
|
|
uint64_t* DATA_B_; |
|
|
uint16_t* MASK_A_; |
|
|
uint16_t* MASK_A_; |
|
|
|
|
|
uint16_t* MASK_B_; |
|
|
uint64_t* DATA_DST_; |
|
|
uint64_t* DATA_DST_; |
|
|
|
|
|
|
|
|
inline uint64_t get_chunk_index(const size_t gid, const size_t rid) { |
|
|
inline uint64_t get_chunk_index(const size_t gid, const size_t rid) { |
|
@ -179,11 +140,24 @@ void process_timings( |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// if more b than j -> perform b normal, subsplit j
|
|
|
|
|
|
// if more j than b -> subsplit b like it is now
|
|
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
constexpr size_t split = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
|
|
|
|
|
|
constexpr bool SUBSPLIT_SCANB = TC_AGGRJ > TC_SCANB; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS_SCANB = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
|
|
|
constexpr uint32_t TC_SUBSPLIT_SCANB = SUBSPLIT_SCANB ? TC_SCANB : TC_AGGRJ; |
|
|
|
|
|
|
|
|
const size_t start = tid * split; |
|
|
|
|
|
const size_t end = start + split; |
|
|
|
|
|
|
|
|
size_t start, end; |
|
|
|
|
|
|
|
|
|
|
|
if constexpr (SUBSPLIT_SCANB) { |
|
|
|
|
|
start = tid * SUBCHUNK_SIZE_ELEMENTS_SCANB; |
|
|
|
|
|
end = start + SUBCHUNK_SIZE_ELEMENTS_SCANB; |
|
|
|
|
|
} |
|
|
|
|
|
else { |
|
|
|
|
|
start = 0; |
|
|
|
|
|
end = RUN_COUNT; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].resize(1); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].resize(1); |
|
@ -194,11 +168,20 @@ void scan_b(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SUBSPLIT_SCANB>(DATA_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B / TC_SUBSPLIT_SCANB); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if constexpr (COMPLEX_QUERY) { |
|
|
|
|
|
for (size_t i = start; i < end; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SUBSPLIT_SCANB>(DATA_B_, chunk_index, i); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask<TC_SUBSPLIT_SCANB>(MASK_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B / TC_AGGRJ); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_B, CHUNK_SIZE_B / TC_SUBSPLIT_SCANB); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -217,13 +200,11 @@ void scan_a(size_t gid, size_t tid) { |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
|
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
@ -233,31 +214,47 @@ void scan_a(size_t gid, size_t tid) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
|
|
|
constexpr bool SUBSPLIT_AGGRJ = TC_SCANB > TC_AGGRJ; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS_AGGRJ = TC_SCANB / TC_AGGRJ; |
|
|
|
|
|
constexpr uint32_t TC_SUBSPLIT_AGGRJ = SUBSPLIT_AGGRJ ? TC_AGGRJ : TC_SCANB; |
|
|
|
|
|
|
|
|
|
|
|
size_t start, end; |
|
|
|
|
|
|
|
|
|
|
|
if constexpr (SUBSPLIT_AGGRJ) { |
|
|
|
|
|
start = tid * SUBCHUNK_SIZE_ELEMENTS_AGGRJ; |
|
|
|
|
|
end = start + SUBCHUNK_SIZE_ELEMENTS_AGGRJ; |
|
|
|
|
|
} |
|
|
|
|
|
else { |
|
|
|
|
|
start = 0; |
|
|
|
|
|
end = RUN_COUNT; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
CACHE_HITS_[gid * tid] = 0; |
|
|
CACHE_HITS_[gid * tid] = 0; |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].resize(1); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].resize(1); |
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
__m512i aggregator = aggregation::OP::zero(); |
|
|
__m512i aggregator = aggregation::OP::zero(); |
|
|
|
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
|
|
|
for (size_t i = start; i < end; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SUBSPLIT_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_a = get_mask<TC_SUBSPLIT_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_b = get_mask<TC_SUBSPLIT_AGGRJ>(MASK_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
uint64_t* data_ptr; |
|
|
uint64_t* data_ptr; |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B / TC_AGGRJ); |
|
|
|
|
|
|
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B / TC_SUBSPLIT_AGGRJ); |
|
|
data->WaitOnCompletion(dsacache::WAIT_WEAK); |
|
|
data->WaitOnCompletion(dsacache::WAIT_WEAK); |
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
|
|
|
|
|
@ -273,7 +270,13 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr, CHUNK_SIZE_B / TC_AGGRJ); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if constexpr (COMPLEX_QUERY) { |
|
|
|
|
|
aggregator = aggregation::apply_masked(aggregator, chunk_ptr, mask_ptr_a, mask_ptr_b, CHUNK_SIZE_B / TC_SUBSPLIT_AGGRJ); |
|
|
|
|
|
} |
|
|
|
|
|
else { |
|
|
|
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, CHUNK_SIZE_B / TC_SUBSPLIT_AGGRJ); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
@ -297,17 +300,23 @@ int main() { |
|
|
|
|
|
|
|
|
fout << "run;rt-ns;rt-s;result[0];scana-run;scana-wait;scanb-run;scanb-wait;aggrj-run;aggrj-wait;cache-hr;" << std::endl; |
|
|
fout << "run;rt-ns;rt-s;result[0];scana-run;scana-wait;scanb-run;scanb-wait;aggrj-run;aggrj-wait;cache-hr;" << std::endl; |
|
|
|
|
|
|
|
|
if constexpr (DATA_IN_HBM) { |
|
|
|
|
|
DATA_A_ = (uint64_t*) numa_alloc_onnode(WL_SIZE_B, cache_node); |
|
|
|
|
|
DATA_B_ = (uint64_t*) numa_alloc_onnode(WL_SIZE_B, cache_node); |
|
|
|
|
|
|
|
|
// a is allways allocated in DRAM
|
|
|
|
|
|
|
|
|
|
|
|
DATA_A_ = (uint64_t*) numa_alloc_local(WL_SIZE_B); |
|
|
|
|
|
|
|
|
|
|
|
// resulting masks for a and b and total result will allways reside in HBM
|
|
|
|
|
|
|
|
|
MASK_A_ = (uint16_t*) numa_alloc_onnode(WL_SIZE_ELEMENTS, cache_node); |
|
|
MASK_A_ = (uint16_t*) numa_alloc_onnode(WL_SIZE_ELEMENTS, cache_node); |
|
|
|
|
|
MASK_B_ = (uint16_t*) numa_alloc_onnode(WL_SIZE_ELEMENTS, cache_node); |
|
|
DATA_DST_ = (uint64_t*) numa_alloc_onnode(TC_AGGRJ * GROUP_COUNT * sizeof(uint64_t), cache_node); |
|
|
DATA_DST_ = (uint64_t*) numa_alloc_onnode(TC_AGGRJ * GROUP_COUNT * sizeof(uint64_t), cache_node); |
|
|
|
|
|
|
|
|
|
|
|
// location of b depends on configuration
|
|
|
|
|
|
|
|
|
|
|
|
if constexpr (STORE_B_IN_HBM) { |
|
|
|
|
|
DATA_B_ = (uint64_t*) numa_alloc_onnode(WL_SIZE_B, cache_node); |
|
|
} |
|
|
} |
|
|
else { |
|
|
else { |
|
|
DATA_A_ = (uint64_t*) numa_alloc_local(WL_SIZE_B); |
|
|
|
|
|
DATA_B_ = (uint64_t*) numa_alloc_local(WL_SIZE_B); |
|
|
DATA_B_ = (uint64_t*) numa_alloc_local(WL_SIZE_B); |
|
|
MASK_A_ = (uint16_t*) numa_alloc_local(WL_SIZE_ELEMENTS); |
|
|
|
|
|
DATA_DST_ = (uint64_t*) numa_alloc_local(TC_AGGRJ * GROUP_COUNT * sizeof(uint64_t)); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
@ -315,7 +324,7 @@ int main() { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fill_mt<uint64_t>(DATA_A_, WL_SIZE_B, 0, 100, 42); |
|
|
fill_mt<uint64_t>(DATA_A_, WL_SIZE_B, 0, 100, 42); |
|
|
fill_mt<uint64_t>(DATA_A_, WL_SIZE_B, 0, 100, 420); |
|
|
|
|
|
|
|
|
fill_mt<uint64_t>(DATA_B_, WL_SIZE_B, 0, 100, 420); |
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < ITERATION_COUNT + WARMUP_ITERATION_COUNT; i++) { |
|
|
for (uint32_t i = 0; i < ITERATION_COUNT + WARMUP_ITERATION_COUNT; i++) { |
|
|
std::promise<void> launch_promise; |
|
|
std::promise<void> launch_promise; |
|
@ -353,6 +362,11 @@ int main() { |
|
|
|
|
|
|
|
|
const auto time_end = std::chrono::steady_clock::now(); |
|
|
const auto time_end = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
const uint64_t result_actual = DATA_DST_[0]; |
|
|
|
|
|
const uint64_t result_expected = COMPLEX_QUERY ? sum_check(CMP_A, DATA_A_, DATA_B_, WL_SIZE_B) : sum_check_complex(CMP_A, CMP_B, DATA_A_, DATA_B_, WL_SIZE_B); |
|
|
|
|
|
|
|
|
|
|
|
std::cout << "Result Expected: " << result_expected << ", Result Actual: " << result_actual << std::endl; |
|
|
|
|
|
|
|
|
if (i >= WARMUP_ITERATION_COUNT) { |
|
|
if (i >= WARMUP_ITERATION_COUNT) { |
|
|
uint64_t scana_run = 0, scana_wait = 0, scanb_run = 0, scanb_wait = 0, aggrj_run = 0, aggrj_wait = 0; |
|
|
uint64_t scana_run = 0, scana_wait = 0, scanb_run = 0, scanb_wait = 0, aggrj_run = 0, aggrj_wait = 0; |
|
|
process_timings(&scana_run, &scana_wait, &scanb_run, &scanb_wait, &aggrj_run, &aggrj_wait); |
|
|
process_timings(&scana_run, &scana_wait, &scanb_run, &scanb_wait, &aggrj_run, &aggrj_wait); |
|
@ -364,7 +378,7 @@ int main() { |
|
|
fout |
|
|
fout |
|
|
<< i - WARMUP_ITERATION_COUNT << ";" |
|
|
<< i - WARMUP_ITERATION_COUNT << ";" |
|
|
<< nanos << ";" << seconds << ";" |
|
|
<< nanos << ";" << seconds << ";" |
|
|
<< std::hex << DATA_DST_[0] << std::dec << ";" |
|
|
|
|
|
|
|
|
<< DATA_DST_[0] << ";" |
|
|
<< scana_run << ";" << scana_wait << ";" << scanb_run << ";" << scanb_wait << ";" << aggrj_run << ";" << aggrj_wait << ";" |
|
|
<< scana_run << ";" << scana_wait << ";" << scanb_run << ";" << scanb_wait << ";" << aggrj_run << ";" << aggrj_wait << ";" |
|
|
<< process_cache_hitrate() << ";" |
|
|
<< process_cache_hitrate() << ";" |
|
|
<< std::endl; |
|
|
<< std::endl; |
|
|