|
|
import os import json import pandas as pd from itertools import chain import seaborn as sns import matplotlib.pyplot as plt
from common import calc_throughput, index_from_element
runid = "Run ID" x_label = "Thread Count" y_label = "Throughput in GiB/s" var_label = "Transfer Size" thread_counts = ["1t", "2t", "12t"] thread_counts_nice = ["1 Thread", "2 Threads", "12 Threads"] engine_counts = ["1mib-1e", "1gib-1e"] engine_counts_nice = ["1 MiB", "1 GiB"]
title = \ """Total Throughput showing cost of MT Submit\n
Copying 120x split on n Threads Intra-Node on DDR\n """
description = \ """Total Throughput showing cost of MT Submit\n
Running 120 Copy Operations split on n Threads\n Copying Intra-Node on DDR performed for multiple Configurations\n """
index = [runid, x_label, var_label] data = []
# loads the measurements from a given file and processes them # so that they are normalized, meaning that the timings returned # are nanoseconds per element transfered def load_time_mesurements(file_path): with open(file_path, 'r') as file: data = json.load(file) count = data["count"] iterations = data["list"][0]["task"]["iterations"]
# work queue size is 120 which is split over all available threads # therefore we divide the result by 120/n_threads to get the per-element speed
return { "total" : sum([x / (iterations * 120) for x in list(chain([data["list"][i]["report"]["time"]["total"] for i in range(count)]))]), "combined" : [x / 120 for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))], "submission" : [x / 120 for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))], "completion" : [x / 120 for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))] }
# procceses a single file and appends the desired timings # to the global data-array, handles multiple runs with a runid # and ignores if the given file is not found as some # configurations may not be benchmarked def process_file_to_dataset(file_path, engine_label, thread_count): engine_index = index_from_element(engine_label,engine_counts) engine_nice = engine_counts_nice[engine_index] threadc_index = index_from_element(thread_count, thread_counts) thread_count_nice = thread_counts_nice[threadc_index] data_size = 0
if engine_label in ["1gib-1e", "1gib-4e"]: data_size = 1024*1024*1024 elif engine_label in ["1mib-1e", "1mib-4e"]: data_size = 1024*1024 else: data_size = 0
try: time = load_time_mesurements(file_path)["combined"] run_idx = 0 for t in time: data.append({ runid : run_idx, x_label: thread_count_nice, var_label : engine_nice, y_label : calc_throughput(data_size, t)}) run_idx = run_idx + 1 except FileNotFoundError: return
# loops over all possible configuration combinations and calls # process_file_to_dataset for them in order to build a dataframe # which is then displayed and saved def main(): result_path = "benchmark-results/" output_path = "benchmark-plots/"
for engine_label in engine_counts: for thread_count in thread_counts: file = os.path.join(result_path, f"mtsubmit-{thread_count}-{engine_label}.json") process_file_to_dataset(file, engine_label, thread_count)
df = pd.DataFrame(data) df.set_index(index, inplace=True)
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
plt.savefig(os.path.join(output_path, "plot-perf-mtsubmit.png"), bbox_inches='tight') plt.show()
if __name__ == "__main__": main()
|