|
@ -5,6 +5,7 @@ |
|
|
#include <fstream>
|
|
|
#include <fstream>
|
|
|
#include <future>
|
|
|
#include <future>
|
|
|
#include <array>
|
|
|
#include <array>
|
|
|
|
|
|
#include <atomic>
|
|
|
|
|
|
|
|
|
#include "const.h"
|
|
|
#include "const.h"
|
|
|
#include "filter.h"
|
|
|
#include "filter.h"
|
|
@ -21,6 +22,7 @@ using aggregation = Aggregation<uint64_t, Sum, load_mode::Stream>; |
|
|
|
|
|
|
|
|
dsacache::Cache CACHE_; |
|
|
dsacache::Cache CACHE_; |
|
|
|
|
|
|
|
|
|
|
|
std::array<std::atomic<int32_t>, GROUP_COUNT> PREFETCHED_CHUNKS_; |
|
|
std::vector<std::barrier<NopStruct>*> BARRIERS_; |
|
|
std::vector<std::barrier<NopStruct>*> BARRIERS_; |
|
|
std::shared_future<void> LAUNCH_; |
|
|
std::shared_future<void> LAUNCH_; |
|
|
|
|
|
|
|
@ -29,24 +31,72 @@ uint64_t* DATA_B_; |
|
|
uint16_t* MASK_A_; |
|
|
uint16_t* MASK_A_; |
|
|
uint64_t* DATA_DST_; |
|
|
uint64_t* DATA_DST_; |
|
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].resize(1); |
|
|
|
|
|
|
|
|
// if more b than j -> perform b normal, subsplit j
|
|
|
|
|
|
// if more j than b -> subsplit b like it is now
|
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
template<size_t TC_CACHING> |
|
|
|
|
|
void caching(size_t gid, size_t tid) { |
|
|
|
|
|
constexpr size_t VIRT_TID_INCREMENT = TC_CACHING / TC_AGGRJ; |
|
|
|
|
|
constexpr size_t SUBCHUNK_THREAD_RATIO = TC_AGGRJ / (TC_CACHING == 0 ? 1 : TC_CACHING); |
|
|
|
|
|
constexpr bool CACHE_SUBCHUNKING = SUBCHUNK_THREAD_RATIO > 1; |
|
|
|
|
|
constexpr bool CACHE_OVERCHUNKING = VIRT_TID_INCREMENT > 1; |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
static_assert(TC_AGGRJ == TC_SCANB); |
|
|
|
|
|
|
|
|
if constexpr (CACHE_SUBCHUNKING) { |
|
|
|
|
|
constexpr size_t SUBCHUNK_COUNT = SUBCHUNK_THREAD_RATIO > 0 ? SUBCHUNK_THREAD_RATIO : 1; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS = CHUNK_SIZE_ELEMENTS / SUBCHUNK_COUNT; |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), SUBCHUNK_SIZE_B_AGGRJ); |
|
|
|
|
|
|
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
|
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * SUBCHUNK_SIZE_ELEMENTS]; |
|
|
|
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
|
|
|
|
|
|
|
|
|
PREFETCHED_CHUNKS_[gid]++; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid].notify_one(); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
else if constexpr (CACHE_OVERCHUNKING) { |
|
|
|
|
|
for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) { |
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid_virt); |
|
|
|
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
|
|
|
|
|
|
|
|
PREFETCHED_CHUNKS_[gid]++; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid].notify_one(); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
else { |
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
|
|
|
|
|
|
|
|
PREFETCHED_CHUNKS_[gid]++; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid].notify_one(); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].resize(1); |
|
|
|
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
caching<TC_SCANB>(gid, tid); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
@ -65,33 +115,36 @@ void scan_a(size_t gid, size_t tid) { |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, SUBCHUNK_SIZE_B_SCANA); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
|
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / TC_AGGRJ; |
|
|
|
|
|
|
|
|
CACHE_HITS_[UniqueIndex(gid,tid)] = 0; |
|
|
CACHE_HITS_[UniqueIndex(gid,tid)] = 0; |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].resize(1); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].resize(RUN_COUNT); |
|
|
|
|
|
|
|
|
__m512i aggregator = aggregation::OP::zero(); |
|
|
__m512i aggregator = aggregation::OP::zero(); |
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
@ -100,9 +153,8 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
uint64_t* data_ptr; |
|
|
uint64_t* data_ptr; |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), SUBCHUNK_SIZE_B_AGGRJ, dsacache::FLAG_ACCESS_WEAK); |
|
|
|
|
|
|
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), SUBCHUNK_SIZE_B, dsacache::FLAG_ACCESS_WEAK); |
|
|
data->WaitOnCompletion(); |
|
|
data->WaitOnCompletion(); |
|
|
|
|
|
|
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
|
|
|
|
|
|
if (data_ptr == nullptr) { |
|
|
if (data_ptr == nullptr) { |
|
@ -120,13 +172,14 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, SUBCHUNK_SIZE_B_AGGRJ); |
|
|
|
|
|
|
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, SUBCHUNK_SIZE_B); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
aggregation::happly(&DATA_DST_[UniqueIndex(gid,tid)], aggregator); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
aggregation::happly(&DATA_DST_[UniqueIndex(gid,tid)], aggregator); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
int main() { |
|
|
int main() { |
|
@ -159,7 +212,6 @@ int main() { |
|
|
// which is configured for xeonmax with smart assignment
|
|
|
// which is configured for xeonmax with smart assignment
|
|
|
uint64_t cache_flags = 0; |
|
|
uint64_t cache_flags = 0; |
|
|
cache_flags |= dsacache::FLAG_WAIT_WEAK; |
|
|
cache_flags |= dsacache::FLAG_WAIT_WEAK; |
|
|
cache_flags |= dsacache::FLAG_HANDLE_PF; |
|
|
|
|
|
CACHE_.SetFlags(cache_flags); |
|
|
CACHE_.SetFlags(cache_flags); |
|
|
CACHE_.Init(CachePlacementPolicy, CopyMethodPolicy); |
|
|
CACHE_.Init(CachePlacementPolicy, CopyMethodPolicy); |
|
|
} |
|
|
} |
|
@ -200,7 +252,7 @@ int main() { |
|
|
for(std::thread& t : agg_pool) { t.join(); } |
|
|
for(std::thread& t : agg_pool) { t.join(); } |
|
|
|
|
|
|
|
|
uint64_t result_actual = 0; |
|
|
uint64_t result_actual = 0; |
|
|
aggregation::apply(&result_actual, DATA_DST_, sizeof(uint64_t) * TC_AGGRJ * GROUP_COUNT); |
|
|
|
|
|
|
|
|
Aggregation<uint64_t, Sum, load_mode::Aligned>::apply(&result_actual, DATA_DST_, sizeof(uint64_t) * TC_AGGRJ * GROUP_COUNT); |
|
|
|
|
|
|
|
|
const auto time_end = std::chrono::steady_clock::now(); |
|
|
const auto time_end = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|