|
@ -29,6 +29,8 @@ constexpr size_t CHUNK_COUNT = WL_SIZE_B / CHUNK_SIZE_B; |
|
|
constexpr size_t CHUNK_SIZE_ELEMENTS = CHUNK_SIZE_B / sizeof(uint64_t); |
|
|
constexpr size_t CHUNK_SIZE_ELEMENTS = CHUNK_SIZE_B / sizeof(uint64_t); |
|
|
constexpr size_t RUN_COUNT = CHUNK_COUNT / GROUP_COUNT; |
|
|
constexpr size_t RUN_COUNT = CHUNK_COUNT / GROUP_COUNT; |
|
|
|
|
|
|
|
|
|
|
|
static_assert(TC_AGGRJ % (TC_SCANB > 0 ? TC_SCANB : TC_AGGRJ) == 0); |
|
|
|
|
|
static_assert(TC_AGGRJ >= TC_SCANB); |
|
|
static_assert(RUN_COUNT > 0); |
|
|
static_assert(RUN_COUNT > 0); |
|
|
static_assert(WL_SIZE_B % 16 == 0); |
|
|
static_assert(WL_SIZE_B % 16 == 0); |
|
|
static_assert(CHUNK_SIZE_B % 16 == 0); |
|
|
static_assert(CHUNK_SIZE_B % 16 == 0); |
|
@ -145,19 +147,8 @@ void process_timings( |
|
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
constexpr bool SUBSPLIT_SCANB = TC_AGGRJ > TC_SCANB; |
|
|
constexpr bool SUBSPLIT_SCANB = TC_AGGRJ > TC_SCANB; |
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS_SCANB = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
|
|
|
constexpr uint32_t TC_SUBSPLIT_SCANB = SUBSPLIT_SCANB ? TC_SCANB : TC_AGGRJ; |
|
|
|
|
|
|
|
|
|
|
|
size_t start, end; |
|
|
|
|
|
|
|
|
|
|
|
if constexpr (SUBSPLIT_SCANB) { |
|
|
|
|
|
start = tid * SUBCHUNK_SIZE_ELEMENTS_SCANB; |
|
|
|
|
|
end = start + SUBCHUNK_SIZE_ELEMENTS_SCANB; |
|
|
|
|
|
} |
|
|
|
|
|
else { |
|
|
|
|
|
start = 0; |
|
|
|
|
|
end = RUN_COUNT; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
constexpr size_t SUBCHUNK_COUNT = TC_AGGRJ / (TC_SCANB == 0 ? 1 : TC_SCANB); |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].resize(1); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid].resize(1); |
|
@ -167,21 +158,24 @@ void scan_b(size_t gid, size_t tid) { |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANB_TIMING_INDEX][tid * gid][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SUBSPLIT_SCANB>(DATA_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B / TC_SUBSPLIT_SCANB); |
|
|
|
|
|
|
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
|
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * CHUNK_SIZE_ELEMENTS]; |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if constexpr (COMPLEX_QUERY) { |
|
|
if constexpr (COMPLEX_QUERY) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SUBSPLIT_SCANB>(DATA_B_, chunk_index, i); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask<TC_SUBSPLIT_SCANB>(MASK_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, i); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask<TC_SCANB>(MASK_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_B, CHUNK_SIZE_B / TC_SUBSPLIT_SCANB); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_B, CHUNK_SIZE_B / TC_SCANB); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -214,21 +208,6 @@ void scan_a(size_t gid, size_t tid) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
constexpr bool SUBSPLIT_AGGRJ = TC_SCANB > TC_AGGRJ; |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS_AGGRJ = TC_SCANB / TC_AGGRJ; |
|
|
|
|
|
constexpr uint32_t TC_SUBSPLIT_AGGRJ = SUBSPLIT_AGGRJ ? TC_AGGRJ : TC_SCANB; |
|
|
|
|
|
|
|
|
|
|
|
size_t start, end; |
|
|
|
|
|
|
|
|
|
|
|
if constexpr (SUBSPLIT_AGGRJ) { |
|
|
|
|
|
start = tid * SUBCHUNK_SIZE_ELEMENTS_AGGRJ; |
|
|
|
|
|
end = start + SUBCHUNK_SIZE_ELEMENTS_AGGRJ; |
|
|
|
|
|
} |
|
|
|
|
|
else { |
|
|
|
|
|
start = 0; |
|
|
|
|
|
end = RUN_COUNT; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
CACHE_HITS_[gid * tid] = 0; |
|
|
CACHE_HITS_[gid * tid] = 0; |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].clear(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid].clear(); |
|
@ -244,17 +223,17 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][tid * gid][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
for (size_t i = start; i < end; i++) { |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SUBSPLIT_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_a = get_mask<TC_SUBSPLIT_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_b = get_mask<TC_SUBSPLIT_AGGRJ>(MASK_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_b = get_mask<TC_AGGRJ>(MASK_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
uint64_t* data_ptr; |
|
|
uint64_t* data_ptr; |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B / TC_SUBSPLIT_AGGRJ); |
|
|
|
|
|
|
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B / TC_AGGRJ); |
|
|
data->WaitOnCompletion(dsacache::WAIT_WEAK); |
|
|
data->WaitOnCompletion(dsacache::WAIT_WEAK); |
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
|
|
|
|
|
@ -272,10 +251,10 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
|
|
|
|
|
|
if constexpr (COMPLEX_QUERY) { |
|
|
if constexpr (COMPLEX_QUERY) { |
|
|
aggregator = aggregation::apply_masked(aggregator, chunk_ptr, mask_ptr_a, mask_ptr_b, CHUNK_SIZE_B / TC_SUBSPLIT_AGGRJ); |
|
|
|
|
|
|
|
|
aggregator = aggregation::apply_masked(aggregator, chunk_ptr, mask_ptr_a, mask_ptr_b, CHUNK_SIZE_B / TC_AGGRJ); |
|
|
} |
|
|
} |
|
|
else { |
|
|
else { |
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, CHUNK_SIZE_B / TC_SUBSPLIT_AGGRJ); |
|
|
|
|
|
|
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, CHUNK_SIZE_B / TC_AGGRJ); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|