import os import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from common import calc_throughput, load_time_mesurements, get_task_count result_path = "benchmark-results/" output_path = "benchmark-plots/" runid = "Run ID" x_label = "Destination Node" y_label = "Throughput in GiB/s" title_allnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Using all 8 DSA Chiplets available on the System""" title_smartnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Using Cross-Copy for Intersocket and all 4 Chiplets of Socket for Intrasocket""" title_difference = \ """Gain in Copy Throughput in GiB/s of All-DSA vs. Smart Assignment""" description_smartnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n Using all 4 DSA Chiplets of a Socket for Intra-Socket Operation\n And using only the Source and Destination Nodes DSA for Inter-Socket""" description_allnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n Using all 8 DSA Chiplets available on the System""" index = [ runid, x_label, y_label] data = [] data_avg = {} # loads the measurements from a given file and processes them # so that they are normalized, meaning that the timings returned # are nanoseconds per element transfered def get_timing(file_path): divisor = get_task_count(file_path) return [ x / divisor for x in load_time_mesurements(file_path)] # procceses a single file and appends the desired timings # to the global data-array, handles multiple runs with a runid # and ignores if the given file is not found as some # configurations may not be benchmarked def process_file_to_dataset(file_path, config, dst_node): size = 1024*1024*1024 if config not in data_avg: data_avg[config] = 0 timing = get_timing(file_path) run_idx = 0 for t in timing: tp = calc_throughput(size, t) data_avg[config] += tp / len(timing) data.append({ runid : run_idx, x_label : dst_node, y_label : tp}) run_idx = run_idx + 1 def plot_bar(table,node_config,display_x,display_y): plt.figure(figsize=(2, 3)) sns.barplot(x=x_label, y=y_label, data=table, palette="mako", errorbar="sd") plt.ylim(0, 70) plt.yticks([15,30,45,60,65]) plt.xlabel(display_x) plt.ylabel(display_y) plt.savefig(os.path.join(output_path, f"plot-{node_config}-throughput.pdf"), bbox_inches='tight') def PlotAndrePeakResults(): data_peakbench_andre = [ { runid : 0, x_label : 8, y_label : 64 }, { runid : 0, x_label : 11, y_label : 63 }, { runid : 0, x_label : 12, y_label : 40 }, { runid : 0, x_label : 15, y_label : 54 } ] df = pd.DataFrame(data_peakbench_andre) df.set_index(index, inplace=True) plot_bar(df, "andrepeak", x_label, y_label) return df # loops over all possible configuration combinations and calls # process_file_to_dataset for them in order to build a dataframe # which is then displayed and saved def main(node_config): dst_nodes = {8,11,12,15} for dst_node in dst_nodes: file = os.path.join(result_path, f"copy-n0ton{dst_node}-1gib-{node_config}.json") process_file_to_dataset(file, node_config, dst_node) data_avg[node_config] = data_avg[node_config] / len(dst_nodes) df = pd.DataFrame(data) data.clear() df.set_index(index, inplace=True) plot_bar(df, node_config, x_label, y_label) return df def get_scaling_factor(baseline,topline,utilfactor): return (topline / baseline) * (1 / utilfactor) if __name__ == "__main__": dsa_df1 = main("1dsa") dsa_df2 = main("2dsa") dsa_df4 = main("4dsa") dsa_df8 = main("8dsa") cpu_df8 = main("8cpu") cpu_dfandre = PlotAndrePeakResults() x_dsacount = "Count of DSAs" y_avgtp = "Average Throughput in GiB/s" y_scaling = "Scaling Factor" data_average = [ { runid : 0, x_label : 1, y_label : data_avg["1dsa"] }, { runid : 0, x_label : 2, y_label : data_avg["2dsa"] }, { runid : 0, x_label : 4, y_label : data_avg["4dsa"] }, { runid : 0, x_label : 8, y_label : data_avg["8dsa"] } ] average_df = pd.DataFrame(data_average) average_df.set_index(index, inplace=True) plot_bar(average_df, "average", x_dsacount, y_avgtp) data_scaling = [ { x_dsacount : 1, y_scaling : get_scaling_factor(data_avg["1dsa"], data_avg["1dsa"], 1) }, { x_dsacount : 2, y_scaling : get_scaling_factor(data_avg["1dsa"], data_avg["2dsa"], 2) }, { x_dsacount : 4, y_scaling : get_scaling_factor(data_avg["1dsa"], data_avg["4dsa"], 4) }, { x_dsacount : 8, y_scaling : get_scaling_factor(data_avg["1dsa"], data_avg["8dsa"], 8) } ] scaling_df = pd.DataFrame(data_scaling) plt.figure(figsize=(2, 3)) fig = sns.lineplot(x=x_dsacount, y=y_scaling, data=scaling_df, marker='o', linestyle='-', color='b', markersize=8) plt.xticks([1,2,4,8]) plt.yticks([0.25,0.5,0.75,1.0]) plt.xlim(0,10) plt.ylim(0.2,1.05) plt.savefig(os.path.join(output_path, f"plot-dsa-throughput-scaling.pdf"), bbox_inches='tight')