|
|
import os import json from numpy import float64, int64 from typing import List import pandas as pd import seaborn as sns import matplotlib.pyplot as plt
from common import calc_throughput, index_from_element, load_time_mesurements
runid = "Run ID" x_label = "Size of Submitted Task" y_label = "Throughput in GiB/s" var_label = "Submission Type" sizes = ["1kib", "4kib", "1mib", "128mib"] sizes_nice = ["1 KiB", "4 KiB", "1 MiB", "128 MiB"] types = ["bs10", "bs50", "ssaw"] types_nice = ["Batch, Size 10", "Batch, Size 50", "Single Submit"]
title = \ """Throughput showing Optimal Submission Method and Size\n
Copy Operation tested Intra-Node on DDR with 1 Engine per WQ"""
description = \ """Throughput showing Optimal Submission Method and Size\n
Batch uses a Batch Descriptor of given Size\n Multi-Submit fills the Work Queue with n Single Descriptors\n Single-Submit submits one Descriptor and immediately waits\n Copy Operation tested Intra-Node on DDR with 1 Engine per WQ"""
index = [runid, x_label, var_label] data = []
# loads the measurements from a given file and processes them # so that they are normalized, meaning that the timings returned # are nanoseconds per element transfered def get_timing(file_path,type_label) -> List[float64]: divisor = 0
if type_label == "bs10": divisor = 10 elif type_label == "bs50" : divisor = 50 else: divisor = 1
return [ x / divisor for x in load_time_mesurements(file_path)]
# procceses a single file and appends the desired timings # to the global data-array, handles multiple runs with a runid # and ignores if the given file is not found as some # configurations may not be benchmarked def process_file_to_dataset(file_path, type_label,size_label): type_index = index_from_element(type_label,types) type_nice = types_nice[type_index] size_index = index_from_element(size_label, sizes) size_nice = sizes_nice[size_index] data_size = 0
if size_label == "1kib": data_size = 1024; elif size_label == "4kib": data_size = 4 * 1024; elif size_label == "1mib": data_size = 1024 * 1024; elif size_label == "32mib": data_size = 32 * 1024 * 1024; elif size_label == "1gib": data_size = 1024 * 1024 * 1024; else: data_size = 0
try: time = get_timing(file_path,type_label) run_idx = 0 for t in time: data.append({ runid : run_idx, x_label: size_nice, var_label : type_nice, y_label : calc_throughput(data_size, t)}) run_idx = run_idx + 1 except FileNotFoundError: return
# loops over all possible configuration combinations and calls # process_file_to_dataset for them in order to build a dataframe # which is then displayed and saved def main(): result_path = "benchmark-results/" output_path = "benchmark-plots/"
for type_label in types: for size in sizes: file = os.path.join(result_path, f"submit-{type_label}-{size}-1e.json") process_file_to_dataset(file, type_label, size)
df = pd.DataFrame(data) df.set_index(index, inplace=True) df = df.sort_values(y_label)
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd") plt.savefig(os.path.join(output_path, "plot-opt-submitmethod.pdf"), bbox_inches='tight') plt.show()
if __name__ == "__main__": main()
|