|
@ -20,7 +20,7 @@ |
|
|
|
|
|
|
|
|
#include "BenchmarkHelpers.cpp"
|
|
|
#include "BenchmarkHelpers.cpp"
|
|
|
|
|
|
|
|
|
#define MODE_HBM
|
|
|
|
|
|
|
|
|
#define MODE_PREFETCH
|
|
|
|
|
|
|
|
|
////////////////////////////////
|
|
|
////////////////////////////////
|
|
|
/// BENCHMARK SETUP
|
|
|
/// BENCHMARK SETUP
|
|
@ -30,11 +30,11 @@ constexpr uint32_t WARMUP_ITERATION_COUNT = 5; |
|
|
constexpr uint32_t ITERATION_COUNT = 5; |
|
|
constexpr uint32_t ITERATION_COUNT = 5; |
|
|
|
|
|
|
|
|
#ifdef MODE_PREFETCH
|
|
|
#ifdef MODE_PREFETCH
|
|
|
constexpr size_t CHUNK_SIZE_B = 128_MiB; |
|
|
|
|
|
constexpr uint32_t GROUP_COUNT = 32; |
|
|
|
|
|
|
|
|
constexpr uint32_t GROUP_COUNT = 16; |
|
|
|
|
|
constexpr size_t CHUNK_SIZE_B = WL_SIZE_B / GROUP_COUNT; |
|
|
constexpr uint32_t TC_SCANA = 1; |
|
|
constexpr uint32_t TC_SCANA = 1; |
|
|
constexpr uint32_t TC_SCANB = 2; |
|
|
|
|
|
constexpr uint32_t TC_AGGRJ = 2; |
|
|
|
|
|
|
|
|
constexpr uint32_t TC_SCANB = 1; |
|
|
|
|
|
constexpr uint32_t TC_AGGRJ = 4; |
|
|
constexpr bool PERFORM_CACHING = true; |
|
|
constexpr bool PERFORM_CACHING = true; |
|
|
constexpr bool DATA_IN_HBM = false; |
|
|
constexpr bool DATA_IN_HBM = false; |
|
|
constexpr char MODE_STRING[] = "prefetch"; |
|
|
constexpr char MODE_STRING[] = "prefetch"; |
|
@ -157,7 +157,7 @@ void process_timings( |
|
|
{ |
|
|
{ |
|
|
uint64_t aggrj_rc = 0; |
|
|
uint64_t aggrj_rc = 0; |
|
|
|
|
|
|
|
|
for (const auto& e : THREAD_TIMING_[SCANB_TIMING_INDEX]) { |
|
|
|
|
|
|
|
|
for (const auto& e : THREAD_TIMING_[AGGRJ_TIMING_INDEX]) { |
|
|
for (const auto& m : e) { |
|
|
for (const auto& m : e) { |
|
|
*aggrj_wait += std::chrono::duration_cast<std::chrono::nanoseconds>(m[TIME_STAMP_WAIT] - m[TIME_STAMP_BEGIN]).count(); |
|
|
*aggrj_wait += std::chrono::duration_cast<std::chrono::nanoseconds>(m[TIME_STAMP_WAIT] - m[TIME_STAMP_BEGIN]).count(); |
|
|
*aggrj_run += std::chrono::duration_cast<std::chrono::nanoseconds>(m[TIME_STAMP_END] - m[TIME_STAMP_WAIT]).count(); |
|
|
*aggrj_run += std::chrono::duration_cast<std::chrono::nanoseconds>(m[TIME_STAMP_END] - m[TIME_STAMP_WAIT]).count(); |
|
@ -174,44 +174,42 @@ void process_timings( |
|
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
constexpr size_t split = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
constexpr size_t split = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
|
|
|
|
|
|
const size_t start = tid * split; |
|
|
const size_t start = tid * split; |
|
|
const size_t end = start + split; |
|
|
const size_t end = start + split; |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].resize(split); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].resize(0); |
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][i][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
const auto data = CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B / TC_AGGRJ); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][i][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
|
|
|
data->WaitOnCompletion(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][i][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B / TC_AGGRJ); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void scan_a(size_t gid, size_t tid) { |
|
|
void scan_a(size_t gid, size_t tid) { |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid].clear(); |
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid].resize(RUN_COUNT); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid].clear(); |
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid].resize(0); |
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][i][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
@ -219,31 +217,29 @@ void scan_a(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][i][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][i][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][tid * gid][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].resize(RUN_COUNT); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].resize(1); |
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
__m512i aggregator = aggregation::OP::zero(); |
|
|
__m512i aggregator = aggregation::OP::zero(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][i][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][i][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
@ -266,10 +262,10 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr, CHUNK_SIZE_B / TC_AGGRJ); |
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr, CHUNK_SIZE_B / TC_AGGRJ); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][i][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
|
|
|
|
|
|
aggregation::happly(DATA_DST_ + (tid * GROUP_COUNT + gid), aggregator); |
|
|
aggregation::happly(DATA_DST_ + (tid * GROUP_COUNT + gid), aggregator); |
|
|