|
@ -72,6 +72,10 @@ constexpr size_t MASK_ELEMENT_SIZE = 16; |
|
|
constexpr size_t MASK_STEP_SIZE = CHUNK_SIZE_ELEMENTS / MASK_ELEMENT_SIZE; |
|
|
constexpr size_t MASK_STEP_SIZE = CHUNK_SIZE_ELEMENTS / MASK_ELEMENT_SIZE; |
|
|
|
|
|
|
|
|
static_assert(RUN_COUNT > 0); |
|
|
static_assert(RUN_COUNT > 0); |
|
|
|
|
|
static_assert(TC_SCANB <= TC_AGGRJ); |
|
|
|
|
|
static_assert(TC_AGGRJ % TC_SCANB == 0); |
|
|
|
|
|
static_assert(WL_SIZE_B % 16 == 0); |
|
|
|
|
|
static_assert(CHUNK_SIZE_B % 16 == 0); |
|
|
|
|
|
|
|
|
using filter = Filter<uint64_t, LT, load_mode::Stream, false>; |
|
|
using filter = Filter<uint64_t, LT, load_mode::Stream, false>; |
|
|
using aggregation = Aggregation<uint64_t, Sum, load_mode::Stream>; |
|
|
using aggregation = Aggregation<uint64_t, Sum, load_mode::Stream>; |
|
@ -86,34 +90,25 @@ uint64_t* DATA_B_; |
|
|
uint16_t* MASK_A_; |
|
|
uint16_t* MASK_A_; |
|
|
uint64_t* DATA_DST_; |
|
|
uint64_t* DATA_DST_; |
|
|
|
|
|
|
|
|
template<size_t TC> |
|
|
|
|
|
inline uint64_t get_chunk_index(const size_t gid, const size_t tid, const size_t rid) { |
|
|
|
|
|
/*
|
|
|
|
|
|
* Calculates Chunk Index as follows: |
|
|
|
|
|
* group_start = (chunk_count / group_count) * gid |
|
|
|
|
|
* thread_start = (chunk_count / (group_count * thread_count)) * tid |
|
|
|
|
|
* run_start = (chunk_count / (group_count * thread_count * run_count)) * rid |
|
|
|
|
|
* index = group_start + thread_start + run_start |
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
|
|
constexpr size_t TC_x_RC = TC * RUN_COUNT; |
|
|
|
|
|
constexpr size_t GC_x_TC_x_RC = GROUP_COUNT * TC_x_RC; |
|
|
|
|
|
|
|
|
|
|
|
const size_t index = (CHUNK_COUNT * (TC_x_RC * gid + RUN_COUNT * tid + rid)) / GC_x_TC_x_RC; |
|
|
|
|
|
|
|
|
|
|
|
return index; |
|
|
|
|
|
|
|
|
inline uint64_t get_chunk_index(const size_t gid, const size_t rid) { |
|
|
|
|
|
return gid + GROUP_COUNT * rid; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
inline uint64_t* get_chunk(uint64_t* base, const size_t chunk_index) { |
|
|
|
|
|
return &base[chunk_index * CHUNK_SIZE_ELEMENTS]; |
|
|
|
|
|
|
|
|
template<size_t TC> |
|
|
|
|
|
inline uint64_t* get_chunk(uint64_t* base, const size_t chunk_index, const size_t tid) { |
|
|
|
|
|
uint64_t* chunk_ptr = base + chunk_index * CHUNK_SIZE_ELEMENTS; |
|
|
|
|
|
return chunk_ptr + tid * (CHUNK_SIZE_ELEMENTS / TC); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
inline uint16_t* get_mask(uint16_t* base, const size_t chunk_index) { |
|
|
|
|
|
return &base[chunk_index * MASK_STEP_SIZE]; |
|
|
|
|
|
|
|
|
template<size_t TC> |
|
|
|
|
|
inline uint16_t* get_mask(uint16_t* base, const size_t chunk_index, const size_t tid) { |
|
|
|
|
|
size_t offset = chunk_index * CHUNK_SIZE_ELEMENTS + tid * (CHUNK_SIZE_ELEMENTS / TC); |
|
|
|
|
|
return base + (offset / 16); |
|
|
|
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
void scan_b(size_t gid, size_t tid) { |
|
|
constexpr size_t split = RUN_COUNT / TC_SCANB; |
|
|
|
|
|
|
|
|
constexpr size_t split = TC_AGGRJ / TC_SCANB; |
|
|
const size_t start = tid * split; |
|
|
const size_t start = tid * split; |
|
|
const size_t end = start + split; |
|
|
const size_t end = start + split; |
|
|
|
|
|
|
|
@ -121,13 +116,14 @@ void scan_b(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
for (size_t i = start; i < end; i++) { |
|
|
const size_t chunk_index = get_chunk_index<TC_AGGRJ>(gid, tid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk(DATA_B_, chunk_index); |
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, 0); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, i); |
|
|
|
|
|
|
|
|
const auto data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B / TC_AGGRJ); |
|
|
const auto data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B / TC_AGGRJ); |
|
|
sched_yield(); |
|
|
|
|
|
|
|
|
|
|
|
data->WaitOnCompletion(); |
|
|
|
|
|
|
|
|
sched_yield(); |
|
|
|
|
|
|
|
|
|
|
|
data->WaitOnCompletion(); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -138,9 +134,9 @@ void scan_a(size_t gid, size_t tid) { |
|
|
LAUNCH_.wait(); |
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index<TC_SCANA>(gid, tid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk(DATA_A_, chunk_index); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask(MASK_A_, chunk_index); |
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
} |
|
|
} |
|
@ -156,9 +152,9 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index<TC_SCANA>(gid, tid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk(DATA_B_, chunk_index); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask(MASK_A_, chunk_index); |
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
uint64_t* data_ptr; |
|
|
uint64_t* data_ptr; |
|
|