|
|
@ -40,41 +40,39 @@ void caching(size_t gid, size_t tid) { |
|
|
|
constexpr bool CACHE_SUBCHUNKING = SUBCHUNK_THREAD_RATIO > 1; |
|
|
|
constexpr bool CACHE_OVERCHUNKING = VIRT_TID_INCREMENT > 1; |
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
|
if constexpr (CACHE_SUBCHUNKING) { |
|
|
|
constexpr size_t SUBCHUNK_COUNT = SUBCHUNK_THREAD_RATIO > 0 ? SUBCHUNK_THREAD_RATIO : 1; |
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS = CHUNK_SIZE_ELEMENTS / SUBCHUNK_COUNT; |
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * SUBCHUNK_SIZE_ELEMENTS]; |
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
else if constexpr (CACHE_OVERCHUNKING) { |
|
|
|
for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) { |
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
uint64_t *chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid_virt); |
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
} |
|
|
|
if constexpr (CACHE_SUBCHUNKING) { |
|
|
|
constexpr size_t SUBCHUNK_COUNT = SUBCHUNK_THREAD_RATIO > 0 ? SUBCHUNK_THREAD_RATIO : 1; |
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS = CHUNK_SIZE_ELEMENTS / SUBCHUNK_COUNT; |
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * SUBCHUNK_SIZE_ELEMENTS]; |
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
|
} |
|
|
|
} |
|
|
|
else { |
|
|
|
} |
|
|
|
else if constexpr (CACHE_OVERCHUNKING) { |
|
|
|
for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) { |
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
uint64_t *chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid_virt); |
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
else { |
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
@ -85,7 +83,7 @@ void scan_b(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
if constexpr (!PERFORM_CACHING_IN_AGGREGATION) { |
|
|
|
if constexpr (PERFORM_CACHING && !PERFORM_CACHING_IN_AGGREGATION) { |
|
|
|
caching<TC_SCANB>(gid, tid); |
|
|
|
} |
|
|
|
|
|
|
@ -135,6 +133,10 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING && PERFORM_CACHING_IN_AGGREGATION) { |
|
|
|
caching<TC_AGGRJ>(gid, tid); |
|
|
|
} |
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|