|
@ -39,12 +39,10 @@ uint64_t* DATA_DST_; |
|
|
// if more j than b -> subsplit b like it is now
|
|
|
// if more j than b -> subsplit b like it is now
|
|
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
constexpr size_t RUN_INCREMENT = TC_SCANB / TC_AGGRJ; |
|
|
|
|
|
constexpr size_t THREAD_RATIO = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
|
|
|
constexpr size_t SUBCHUNK_COUNT = THREAD_RATIO > 0 ? THREAD_RATIO : 1; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS = CHUNK_SIZE_ELEMENTS / SUBCHUNK_COUNT; |
|
|
|
|
|
constexpr size_t TC_ACTUAL = TC_AGGRJ >= TC_SCANB ? TC_SCANB : TC_AGGRJ; |
|
|
|
|
|
|
|
|
constexpr size_t VIRT_TID_INCREMENT = TC_SCANB / TC_AGGRJ; |
|
|
|
|
|
constexpr size_t SUBCHUNK_THREAD_RATIO = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
|
|
|
constexpr bool CACHE_SUBCHUNKING = SUBCHUNK_THREAD_RATIO > 1; |
|
|
|
|
|
constexpr bool CACHE_OVERCHUNKING = VIRT_TID_INCREMENT > 1; |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].resize(1); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)].resize(1); |
|
@ -54,15 +52,38 @@ void scan_b(size_t gid, size_t tid) { |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i += RUN_INCREMENT) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_ACTUAL>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
|
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
|
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * SUBCHUNK_SIZE_ELEMENTS]; |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
|
|
|
|
|
|
if constexpr (CACHE_SUBCHUNKING) { |
|
|
|
|
|
constexpr size_t SUBCHUNK_COUNT = SUBCHUNK_THREAD_RATIO > 0 ? SUBCHUNK_THREAD_RATIO : 1; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS = CHUNK_SIZE_ELEMENTS / SUBCHUNK_COUNT; |
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
|
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
|
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * SUBCHUNK_SIZE_ELEMENTS]; |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
else if constexpr (CACHE_OVERCHUNKING) { |
|
|
|
|
|
for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) { |
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid_virt); |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
else { |
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if constexpr (COMPLEX_QUERY) { |
|
|
if constexpr (COMPLEX_QUERY) { |
|
|