Browse Source

remove broken implementation for non-divisible chunk-group-thread-counts

master
Constantin Fürst 11 months ago
parent
commit
20c6e54df7
  1. 97
      qdp_project/src/Benchmark.cpp
  2. 15
      qdp_project/src/Configuration.hpp

97
qdp_project/src/Benchmark.cpp

@ -55,15 +55,6 @@ void caching(size_t gid, size_t tid) {
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B);
} }
} }
constexpr size_t LAST_CHUNK_SIZE_B = SUBCHUNK_SIZE_B + (CHUNK_SIZE_B % SUBCHUNK_COUNT);
if constexpr (LAST_CHUNK_SIZE_B > 0) {
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) {
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1);
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid);
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), LAST_CHUNK_SIZE_B);
}
}
} }
else if constexpr (CACHE_OVERCHUNKING) { else if constexpr (CACHE_OVERCHUNKING) {
for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) { for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) {
@ -74,15 +65,6 @@ void caching(size_t gid, size_t tid) {
CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B); CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B);
} }
} }
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_AGGRJ * GROUP_COUNT));
if constexpr (LAST_CHUNK_SIZE_B > 0) {
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) {
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1);
uint64_t *chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid);
CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), LAST_CHUNK_SIZE_B);
}
}
} }
else { else {
for (size_t i = 0; i < RUN_COUNT; i++) { for (size_t i = 0; i < RUN_COUNT; i++) {
@ -91,15 +73,6 @@ void caching(size_t gid, size_t tid) {
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B);
} }
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % ((TC_SCANB > 0 ? TC_SCANB : 1) * GROUP_COUNT));
if constexpr (LAST_CHUNK_SIZE_B > 0) {
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) {
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1);
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid);
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), LAST_CHUNK_SIZE_B);
}
}
} }
} }
} }
@ -121,8 +94,6 @@ void scan_b(size_t gid, size_t tid) {
} }
void scan_a(size_t gid, size_t tid) { void scan_a(size_t gid, size_t tid) {
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_SCANA * GROUP_COUNT));
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].clear();
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].resize(RUN_COUNT); THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].resize(RUN_COUNT);
INTERNAL_TIMING_VECTOR_LOAD_[SCANA_TIMING_INDEX].clear(); INTERNAL_TIMING_VECTOR_LOAD_[SCANA_TIMING_INDEX].clear();
@ -146,46 +117,12 @@ void scan_a(size_t gid, size_t tid) {
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now();
} }
if constexpr (LAST_CHUNK_SIZE_B > 0) {
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) {
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1);
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid);
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid);
uint64_t t_unused;
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, LAST_CHUNK_SIZE_B, &t_unused);
}
}
BARRIERS_[gid]->arrive_and_drop(); BARRIERS_[gid]->arrive_and_drop();
} }
template <size_t size> template <size_t size>
__m512i AggrFn(uint64_t* chunk_ptr, uint16_t* mask_ptr_a, const uint32_t tid, const uint32_t gid, __m512i aggregator, uint64_t* load_time) { __m512i AggrFn(uint64_t* chunk_ptr, uint16_t* mask_ptr_a, const uint32_t tid, const uint32_t gid, __m512i aggregator, uint64_t* load_time) {
std::unique_ptr<dsacache::CacheData> data;
uint64_t* data_ptr;
if constexpr (PERFORM_CACHING) {
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), size, dsacache::FLAG_ACCESS_WEAK);
data->WaitOnCompletion();
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation());
if (data_ptr == nullptr) {
data_ptr = chunk_ptr;
}
else if (data_ptr == chunk_ptr) {
// prevent counting weak-accesses
}
else {
CACHE_HITS_[UniqueIndex(gid,tid)]++;
}
}
else {
data_ptr = chunk_ptr;
}
uint64_t tmp = _mm512_reduce_add_epi64(aggregator);
return aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, size, load_time);
} }
void aggr_j(size_t gid, size_t tid) { void aggr_j(size_t gid, size_t tid) {
@ -214,20 +151,34 @@ void aggr_j(size_t gid, size_t tid) {
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid);
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid);
aggregator = AggrFn<SUBCHUNK_SIZE_B>(chunk_ptr, mask_ptr_a, tid, gid, aggregator, &INTERNAL_TIMING_VECTOR_LOAD_[AGGRJ_TIMING_INDEX][i]);
std::unique_ptr<dsacache::CacheData> data;
uint64_t* data_ptr;
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_END] = std::chrono::steady_clock::now();
}
if constexpr (PERFORM_CACHING) {
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), SUBCHUNK_SIZE_B, dsacache::FLAG_ACCESS_WEAK);
data->WaitOnCompletion();
if constexpr (LAST_CHUNK_SIZE_B > 0) {
if (gid == GROUP_COUNT - 1 && tid == TC_AGGRJ - 1) {
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1);
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid);
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid);
uint64_t t_unused;
AggrFn<SUBCHUNK_SIZE_B>(chunk_ptr, mask_ptr_a, tid, gid, aggregator, &t_unused);
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation());
if (data_ptr == nullptr) {
data_ptr = chunk_ptr;
}
else if (data_ptr == chunk_ptr) {
// prevent counting weak-accesses
}
else {
CACHE_HITS_[UniqueIndex(gid,tid)]++;
} }
} }
else {
data_ptr = chunk_ptr;
}
uint64_t tmp = _mm512_reduce_add_epi64(aggregator);
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, SUBCHUNK_SIZE_B, &INTERNAL_TIMING_VECTOR_LOAD_[AGGRJ_TIMING_INDEX][i]);
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_END] = std::chrono::steady_clock::now();
}
BARRIERS_[gid]->arrive_and_drop(); BARRIERS_[gid]->arrive_and_drop();

15
qdp_project/src/Configuration.hpp

@ -13,20 +13,20 @@ constexpr int MEM_NODE_HBM = 8;
constexpr int MEM_NODE_DRAM = 0; constexpr int MEM_NODE_DRAM = 0;
#ifdef MODE_PREFETCH #ifdef MODE_PREFETCH
constexpr uint32_t GROUP_COUNT = 12;
constexpr uint32_t GROUP_COUNT = 8;
constexpr size_t CHUNK_SIZE_B = 16_MiB; constexpr size_t CHUNK_SIZE_B = 16_MiB;
constexpr uint32_t TC_SCANA = 1;
constexpr uint32_t TC_SCANA = 2;
constexpr uint32_t TC_SCANB = 1; constexpr uint32_t TC_SCANB = 1;
constexpr uint32_t TC_AGGRJ = 1; constexpr uint32_t TC_AGGRJ = 1;
constexpr bool PERFORM_CACHING = true; constexpr bool PERFORM_CACHING = true;
constexpr bool PERFORM_CACHING_IN_AGGREGATION = true;
constexpr bool PERFORM_CACHING_IN_AGGREGATION = false;
constexpr int MEM_NODE_A = 1; constexpr int MEM_NODE_A = 1;
constexpr int MEM_NODE_B = 2; constexpr int MEM_NODE_B = 2;
constexpr char MODE_STRING[] = "prefetch"; constexpr char MODE_STRING[] = "prefetch";
#endif #endif
#ifdef MODE_DRAM #ifdef MODE_DRAM
constexpr size_t CHUNK_SIZE_B = 2_MiB; constexpr size_t CHUNK_SIZE_B = 2_MiB;
constexpr uint32_t GROUP_COUNT = 12;
constexpr uint32_t GROUP_COUNT = 8;
constexpr uint32_t TC_SCANA = 2; constexpr uint32_t TC_SCANA = 2;
constexpr uint32_t TC_SCANB = 0; constexpr uint32_t TC_SCANB = 0;
constexpr uint32_t TC_AGGRJ = 1; constexpr uint32_t TC_AGGRJ = 1;
@ -38,7 +38,7 @@ constexpr char MODE_STRING[] = "dram";
#endif #endif
#ifdef MODE_HBM #ifdef MODE_HBM
constexpr size_t CHUNK_SIZE_B = 2_MiB; constexpr size_t CHUNK_SIZE_B = 2_MiB;
constexpr uint32_t GROUP_COUNT = 12;
constexpr uint32_t GROUP_COUNT = 8;
constexpr uint32_t TC_SCANA = 2; constexpr uint32_t TC_SCANA = 2;
constexpr uint32_t TC_SCANB = 0; constexpr uint32_t TC_SCANB = 0;
constexpr uint32_t TC_AGGRJ = 1; constexpr uint32_t TC_AGGRJ = 1;
@ -58,3 +58,8 @@ constexpr size_t RUN_COUNT = CHUNK_COUNT / GROUP_COUNT;
static_assert(RUN_COUNT > 0); static_assert(RUN_COUNT > 0);
static_assert(WL_SIZE_B % 16 == 0); static_assert(WL_SIZE_B % 16 == 0);
static_assert(CHUNK_SIZE_B % 16 == 0); static_assert(CHUNK_SIZE_B % 16 == 0);
static_assert(CHUNK_SIZE_B % GROUP_COUNT == 0);
static_assert(CHUNK_SIZE_B % TC_AGGRJ == 0);
static_assert(CHUNK_SIZE_B % TC_SCANB == 0);
static_assert(CHUNK_SIZE_B % TC_SCANA == 0);
Loading…
Cancel
Save