|
@ -17,12 +17,11 @@ |
|
|
#include "Configuration.hpp"
|
|
|
#include "Configuration.hpp"
|
|
|
#include "BenchmarkHelpers.cpp"
|
|
|
#include "BenchmarkHelpers.cpp"
|
|
|
|
|
|
|
|
|
using filter = Filter<uint64_t, LT, load_mode::Stream, false>; |
|
|
|
|
|
using aggregation = Aggregation<uint64_t, Sum, load_mode::Stream>; |
|
|
|
|
|
|
|
|
using filter = FilterLT<uint64_t, load_mode::Stream>; |
|
|
|
|
|
using aggregation = AggregationSUM<uint64_t, load_mode::Stream>; |
|
|
|
|
|
|
|
|
dsacache::Cache CACHE_; |
|
|
dsacache::Cache CACHE_; |
|
|
|
|
|
|
|
|
std::array<std::atomic<int32_t>, GROUP_COUNT> PREFETCHED_CHUNKS_; |
|
|
|
|
|
std::vector<std::barrier<NopStruct>*> BARRIERS_; |
|
|
std::vector<std::barrier<NopStruct>*> BARRIERS_; |
|
|
std::shared_future<void> LAUNCH_; |
|
|
std::shared_future<void> LAUNCH_; |
|
|
|
|
|
|
|
@ -46,8 +45,6 @@ void caching(size_t gid, size_t tid) { |
|
|
constexpr size_t SUBCHUNK_COUNT = SUBCHUNK_THREAD_RATIO > 0 ? SUBCHUNK_THREAD_RATIO : 1; |
|
|
constexpr size_t SUBCHUNK_COUNT = SUBCHUNK_THREAD_RATIO > 0 ? SUBCHUNK_THREAD_RATIO : 1; |
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / SUBCHUNK_COUNT; |
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS = CHUNK_SIZE_ELEMENTS / SUBCHUNK_COUNT; |
|
|
constexpr size_t SUBCHUNK_SIZE_ELEMENTS = CHUNK_SIZE_ELEMENTS / SUBCHUNK_COUNT; |
|
|
constexpr size_t LAST_SUBCHUNK_SIZE_B = SUBCHUNK_SIZE_B + (CHUNK_SIZE_B % SUBCHUNK_COUNT); |
|
|
|
|
|
// TODO: last thread (whether simulated or not) in last group must use last subchunk size for its last subchunk in the last run as size
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
@ -55,42 +52,53 @@ void caching(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
for (size_t j = 0; j < SUBCHUNK_COUNT; j++) { |
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * SUBCHUNK_SIZE_ELEMENTS]; |
|
|
uint64_t* sub_chunk_ptr = &chunk_ptr[j * SUBCHUNK_SIZE_ELEMENTS]; |
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(sub_chunk_ptr), SUBCHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
PREFETCHED_CHUNKS_[gid]++; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid].notify_one(); |
|
|
|
|
|
|
|
|
constexpr size_t LAST_CHUNK_SIZE_B = SUBCHUNK_SIZE_B + (CHUNK_SIZE_B % SUBCHUNK_COUNT); |
|
|
|
|
|
if constexpr (LAST_CHUNK_SIZE_B > 0) { |
|
|
|
|
|
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), LAST_CHUNK_SIZE_B); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
else if constexpr (CACHE_OVERCHUNKING) { |
|
|
else if constexpr (CACHE_OVERCHUNKING) { |
|
|
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_AGGRJ * GROUP_COUNT)); |
|
|
|
|
|
// TODO: last thread (whether simulated or not) in last group must use last chunk size for its last run as size
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) { |
|
|
for (size_t tid_virt = tid; tid_virt < TC_AGGRJ; tid_virt += VIRT_TID_INCREMENT) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t *chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid_virt); |
|
|
uint64_t *chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid_virt); |
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B); |
|
|
CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
PREFETCHED_CHUNKS_[gid]++; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid].notify_one(); |
|
|
|
|
|
|
|
|
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_AGGRJ * GROUP_COUNT)); |
|
|
|
|
|
if constexpr (LAST_CHUNK_SIZE_B > 0) { |
|
|
|
|
|
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1); |
|
|
|
|
|
uint64_t *chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), LAST_CHUNK_SIZE_B); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
else { |
|
|
else { |
|
|
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % ((TC_SCANB > 0 ? TC_SCANB : 1) * GROUP_COUNT)); |
|
|
|
|
|
// TODO: last thread in last group must use last chunk size for its last run as size
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), CHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
PREFETCHED_CHUNKS_[gid]++; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid].notify_one(); |
|
|
|
|
|
|
|
|
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % ((TC_SCANB > 0 ? TC_SCANB : 1) * GROUP_COUNT)); |
|
|
|
|
|
if constexpr (LAST_CHUNK_SIZE_B > 0) { |
|
|
|
|
|
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANB>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
CACHE_.Access(reinterpret_cast<uint8_t*>(chunk_ptr), LAST_CHUNK_SIZE_B); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
@ -114,69 +122,53 @@ void scan_b(size_t gid, size_t tid) { |
|
|
|
|
|
|
|
|
void scan_a(size_t gid, size_t tid) { |
|
|
void scan_a(size_t gid, size_t tid) { |
|
|
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_SCANA * GROUP_COUNT)); |
|
|
constexpr size_t LAST_CHUNK_SIZE_B = CHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_SCANA * GROUP_COUNT)); |
|
|
// TODO: last thread in last group must use last chunk size for its last run as size
|
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].resize(1); |
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)].resize(RUN_COUNT); |
|
|
|
|
|
INTERNAL_TIMING_VECTOR_LOAD_[SCANA_TIMING_INDEX].clear(); |
|
|
|
|
|
INTERNAL_TIMING_VECTOR_LOAD_[SCANA_TIMING_INDEX].resize(RUN_COUNT); |
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
filter::apply_same(mask_ptr, nullptr, chunk_ptr, CMP_A, CHUNK_SIZE_B / TC_SCANA); |
|
|
|
|
|
|
|
|
const auto internal_timing = filter::apply_same<CMP_A, CHUNK_SIZE_B / TC_SCANA>(mask_ptr, chunk_ptr); |
|
|
|
|
|
INTERNAL_TIMING_VECTOR_LOAD_[SCANA_TIMING_INDEX][i] = internal_timing; |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[SCANA_TIMING_INDEX][UniqueIndex(gid,tid)][0][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / TC_AGGRJ; |
|
|
|
|
|
constexpr size_t LAST_CHUNK_SIZE_B = SUBCHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_AGGRJ * GROUP_COUNT)); |
|
|
|
|
|
// TODO: last thread in last group must use last chunk size for its last run as size
|
|
|
|
|
|
|
|
|
|
|
|
CACHE_HITS_[UniqueIndex(gid,tid)] = 0; |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].resize(RUN_COUNT); |
|
|
|
|
|
|
|
|
|
|
|
__m512i aggregator = aggregation::OP::zero(); |
|
|
|
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
|
|
|
while (true) { |
|
|
|
|
|
const int32_t old = PREFETCHED_CHUNKS_[gid].fetch_sub(1); |
|
|
|
|
|
if (old > 0) break; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid]++; |
|
|
|
|
|
PREFETCHED_CHUNKS_[gid].wait(0); |
|
|
|
|
|
|
|
|
if constexpr (LAST_CHUNK_SIZE_B > 0) { |
|
|
|
|
|
if (gid == GROUP_COUNT - 1 && tid == TC_SCANB - 1) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_SCANA>(DATA_A_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr = get_mask<TC_SCANA>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
filter::apply_same<CMP_A, LAST_CHUNK_SIZE_B>(mask_ptr, chunk_ptr); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
template <size_t size> |
|
|
|
|
|
uint64_t AggrFn(uint64_t* chunk_ptr, uint16_t* mask_ptr_a, const uint32_t tid, const uint32_t gid, __m512i& aggregator) { |
|
|
|
|
|
|
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
std::unique_ptr<dsacache::CacheData> data; |
|
|
uint64_t* data_ptr; |
|
|
uint64_t* data_ptr; |
|
|
|
|
|
|
|
|
if constexpr (PERFORM_CACHING) { |
|
|
if constexpr (PERFORM_CACHING) { |
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), SUBCHUNK_SIZE_B, dsacache::FLAG_ACCESS_WEAK); |
|
|
|
|
|
|
|
|
data = CACHE_.Access(reinterpret_cast<uint8_t *>(chunk_ptr), size, dsacache::FLAG_ACCESS_WEAK); |
|
|
data->WaitOnCompletion(); |
|
|
data->WaitOnCompletion(); |
|
|
|
|
|
|
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
data_ptr = reinterpret_cast<uint64_t*>(data->GetDataLocation()); |
|
|
|
|
|
|
|
|
if (data_ptr == nullptr) { |
|
|
if (data_ptr == nullptr) { |
|
@ -194,11 +186,50 @@ void aggr_j(size_t gid, size_t tid) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
uint64_t tmp = _mm512_reduce_add_epi64(aggregator); |
|
|
aggregator = aggregation::apply_masked(aggregator, data_ptr, mask_ptr_a, SUBCHUNK_SIZE_B); |
|
|
|
|
|
|
|
|
return aggregation::apply_masked<size>(aggregator, data_ptr, mask_ptr_a); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void aggr_j(size_t gid, size_t tid) { |
|
|
|
|
|
constexpr size_t SUBCHUNK_SIZE_B = CHUNK_SIZE_B / TC_AGGRJ; |
|
|
|
|
|
constexpr size_t LAST_CHUNK_SIZE_B = SUBCHUNK_SIZE_B + (CHUNK_SIZE_B % (TC_AGGRJ * GROUP_COUNT)); |
|
|
|
|
|
|
|
|
|
|
|
CACHE_HITS_[UniqueIndex(gid,tid)] = 0; |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].clear(); |
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)].resize(RUN_COUNT); |
|
|
|
|
|
INTERNAL_TIMING_VECTOR_LOAD_[AGGRJ_TIMING_INDEX].clear(); |
|
|
|
|
|
INTERNAL_TIMING_VECTOR_LOAD_[AGGRJ_TIMING_INDEX].resize(RUN_COUNT); |
|
|
|
|
|
|
|
|
|
|
|
__m512i aggregator = aggregation::zero(); |
|
|
|
|
|
|
|
|
|
|
|
LAUNCH_.wait(); |
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < RUN_COUNT; i++) { |
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_BEGIN] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_wait(); |
|
|
|
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_WAIT] = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, i); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
|
|
|
|
|
|
const auto internal_timing = AggrFn<SUBCHUNK_SIZE_B>(chunk_ptr, mask_ptr_a, tid, gid, aggregator); |
|
|
|
|
|
INTERNAL_TIMING_VECTOR_LOAD_[AGGRJ_TIMING_INDEX][i] = internal_timing; |
|
|
|
|
|
|
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
THREAD_TIMING_[AGGRJ_TIMING_INDEX][UniqueIndex(gid,tid)][i][TIME_STAMP_END] = std::chrono::steady_clock::now(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if constexpr (LAST_CHUNK_SIZE_B > 0) { |
|
|
|
|
|
if (gid == GROUP_COUNT - 1 && tid == TC_AGGRJ - 1) { |
|
|
|
|
|
const size_t chunk_index = get_chunk_index(gid, RUN_COUNT + 1); |
|
|
|
|
|
uint64_t* chunk_ptr = get_chunk<TC_AGGRJ>(DATA_B_, chunk_index, tid); |
|
|
|
|
|
uint16_t* mask_ptr_a = get_mask<TC_AGGRJ>(MASK_A_, chunk_index, tid); |
|
|
|
|
|
AggrFn<SUBCHUNK_SIZE_B>(chunk_ptr, mask_ptr_a, tid, gid, aggregator); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
BARRIERS_[gid]->arrive_and_drop(); |
|
|
|
|
|
|
|
|
aggregation::happly(&DATA_DST_[UniqueIndex(gid,tid)], aggregator); |
|
|
aggregation::happly(&DATA_DST_[UniqueIndex(gid,tid)], aggregator); |
|
@ -219,7 +250,7 @@ int main() { |
|
|
const std::string ofname = "results/qdp-xeonmax-" + std::string(MODE_STRING) + "-tca" + std::to_string(TC_SCANA) + "-tcb" + std::to_string(TC_SCANB) + "-tcj" + std::to_string(TC_AGGRJ) + "-tmul" + std::to_string(GROUP_COUNT) + "-wl" + std::to_string(WL_SIZE_B) + "-cs" + std::to_string(CHUNK_SIZE_B) + ".csv"; |
|
|
const std::string ofname = "results/qdp-xeonmax-" + std::string(MODE_STRING) + "-tca" + std::to_string(TC_SCANA) + "-tcb" + std::to_string(TC_SCANB) + "-tcj" + std::to_string(TC_AGGRJ) + "-tmul" + std::to_string(GROUP_COUNT) + "-wl" + std::to_string(WL_SIZE_B) + "-cs" + std::to_string(CHUNK_SIZE_B) + ".csv"; |
|
|
std::ofstream fout(ofname); |
|
|
std::ofstream fout(ofname); |
|
|
|
|
|
|
|
|
fout << "run;rt-ns;rt-s;result[0];scana-run;scana-wait;scanb-run;scanb-wait;aggrj-run;aggrj-wait;cache-hr;" << std::endl; |
|
|
|
|
|
|
|
|
fout << "run;rt-ns;rt-s;result[0];scana-run;scana-wait;scana-load;scanb-run;scanb-wait;aggrj-run;aggrj-wait;aggrj-load;cache-hr;" << std::endl; |
|
|
|
|
|
|
|
|
DATA_A_ = (uint64_t*) numa_alloc_onnode(WL_SIZE_B, MEM_NODE_A); |
|
|
DATA_A_ = (uint64_t*) numa_alloc_onnode(WL_SIZE_B, MEM_NODE_A); |
|
|
DATA_B_ = (uint64_t*) numa_alloc_onnode(WL_SIZE_B, MEM_NODE_B); |
|
|
DATA_B_ = (uint64_t*) numa_alloc_onnode(WL_SIZE_B, MEM_NODE_B); |
|
@ -274,7 +305,7 @@ int main() { |
|
|
for(std::thread& t : agg_pool) { t.join(); } |
|
|
for(std::thread& t : agg_pool) { t.join(); } |
|
|
|
|
|
|
|
|
uint64_t result_actual = 0; |
|
|
uint64_t result_actual = 0; |
|
|
Aggregation<uint64_t, Sum, load_mode::Aligned>::apply(&result_actual, DATA_DST_, sizeof(uint64_t) * TC_AGGRJ * GROUP_COUNT); |
|
|
|
|
|
|
|
|
aggregation::apply<sizeof(uint64_t) * TC_AGGRJ * GROUP_COUNT>(&result_actual, DATA_DST_); |
|
|
|
|
|
|
|
|
const auto time_end = std::chrono::steady_clock::now(); |
|
|
const auto time_end = std::chrono::steady_clock::now(); |
|
|
|
|
|
|
|
@ -283,8 +314,8 @@ int main() { |
|
|
std::cout << "Result Expected: " << result_expected << ", Result Actual: " << result_actual << std::endl; |
|
|
std::cout << "Result Expected: " << result_expected << ", Result Actual: " << result_actual << std::endl; |
|
|
|
|
|
|
|
|
if (i >= WARMUP_ITERATION_COUNT) { |
|
|
if (i >= WARMUP_ITERATION_COUNT) { |
|
|
uint64_t scana_run = 0, scana_wait = 0, scanb_run = 0, scanb_wait = 0, aggrj_run = 0, aggrj_wait = 0; |
|
|
|
|
|
process_timings(&scana_run, &scana_wait, &scanb_run, &scanb_wait, &aggrj_run, &aggrj_wait); |
|
|
|
|
|
|
|
|
uint64_t scana_run = 0, scana_wait = 0, scanb_run = 0, scanb_wait = 0, aggrj_run = 0, aggrj_wait = 0, scana_load = 0, aggrj_load = 0; |
|
|
|
|
|
process_timings(&scana_run, &scana_wait, &scanb_run, &scanb_wait, &aggrj_run, &aggrj_wait, &scana_load, &aggrj_load); |
|
|
|
|
|
|
|
|
constexpr double nanos_per_second = ((double)1000) * 1000 * 1000; |
|
|
constexpr double nanos_per_second = ((double)1000) * 1000 * 1000; |
|
|
const uint64_t nanos = std::chrono::duration_cast<std::chrono::nanoseconds>(time_end - time_start).count(); |
|
|
const uint64_t nanos = std::chrono::duration_cast<std::chrono::nanoseconds>(time_end - time_start).count(); |
|
@ -294,7 +325,7 @@ int main() { |
|
|
<< i - WARMUP_ITERATION_COUNT << ";" |
|
|
<< i - WARMUP_ITERATION_COUNT << ";" |
|
|
<< nanos << ";" << seconds << ";" |
|
|
<< nanos << ";" << seconds << ";" |
|
|
<< result_actual << ";" |
|
|
<< result_actual << ";" |
|
|
<< scana_run << ";" << scana_wait << ";" << scanb_run << ";" << scanb_wait << ";" << aggrj_run << ";" << aggrj_wait << ";" |
|
|
|
|
|
|
|
|
<< scana_run << ";" << scana_wait << ";" << scana_load << ";" << scanb_run << ";" << scanb_wait << ";" << aggrj_run << ";" << aggrj_wait << ";" << aggrj_load << ";" |
|
|
<< process_cache_hitrate() << ";" |
|
|
<< process_cache_hitrate() << ";" |
|
|
<< std::endl; |
|
|
<< std::endl; |
|
|
} |
|
|
} |
|
|