import os import json import pandas as pd from itertools import chain import seaborn as sns import matplotlib.pyplot as plt from common import calc_throughput folder_path = "benchmark-results/" runid = "Run ID" x_label = "Destination Node" y_label = "Source Node" v_label = "Throughput" title_allnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Using all 8 DSA Chiplets available on the System""" title_smartnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Using Cross-Copy for Intersocket and all 4 Chiplets of Socket for Intrasocket""" title_difference = \ """Gain in Copy Throughput in GiB/s of All-DSA vs. Smart Assignment""" description_smartnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n Using all 4 DSA Chiplets of a Socket for Intra-Socket Operation\n And using only the Source and Destination Nodes DSA for Inter-Socket""" description_allnodes = \ """Copy Throughput in GiB/s tested for 1GiB Elements\n Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n Using all 8 DSA Chiplets available on the System""" index = [ runid, x_label, y_label] data = [] # loads the measurements from a given file and processes them # so that they are normalized, meaning that the timings returned # are nanoseconds per element transfered def load_time_mesurements(file_path): with open(file_path, 'r') as file: data = json.load(file) count = data["count"] batch_size = data["list"][0]["task"]["batching"]["batch_size"] if data["list"][0]["task"]["batching"]["batch_size"] > 0 else 1 iterations = data["list"][0]["task"]["iterations"] return { "size": data["list"][0]["task"]["size"], "total": sum([x / (iterations * batch_size * count * count) for x in list(chain([data["list"][i]["report"]["time"]["total"] for i in range(count)]))]), "combined": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))], "submission": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))], "completion": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))] } # procceses a single file and appends the desired timings # to the global data-array, handles multiple runs with a runid # and ignores if the given file is not found as some # configurations may not be benchmarked def process_file_to_dataset(file_path, src_node, dst_node): try: file_data = load_time_mesurements(file_path) time = [file_data["total"]] run_idx = 0 for t in time: data.append({ runid : run_idx, x_label : dst_node, y_label : src_node, v_label: calc_throughput(file_data["size"], t)}) run_idx = run_idx + 1 except FileNotFoundError: return def plot_heatmap(table,title,node_config): plt.figure(figsize=(8, 6)) sns.heatmap(table, annot=True, cmap="YlGn", fmt=".0f") plt.title(title) plt.savefig(os.path.join(folder_path, f"plot-perf-{node_config}-throughput.png"), bbox_inches='tight') plt.show() # loops over all possible configuration combinations and calls # process_file_to_dataset for them in order to build a dataframe # which is then displayed and saved def main(node_config,title): for src_node in range(16): for dst_node in range(16): size = "512mib" if node_config == "allnodes" and src_node == dst_node and src_node >= 8 else "1gib" file = os.path.join(folder_path, f"copy-n{src_node}ton{dst_node}-{size}-{node_config}-1e.json") process_file_to_dataset(file, src_node, dst_node) df = pd.DataFrame(data) data.clear() df.set_index(index, inplace=True) data_pivot = df.pivot_table(index=y_label, columns=x_label, values=v_label) plot_heatmap(data_pivot, title, node_config) return data_pivot if __name__ == "__main__": dall = main("allnodes", title_allnodes) dsmart = main("smart", title_smartnodes) ddiff = dall - dsmart plot_heatmap(ddiff,title_difference,"diff")