diff --git a/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py b/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py index e1d4879..d568a9c 100644 --- a/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py +++ b/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py @@ -7,13 +7,13 @@ import matplotlib.pyplot as plt runid = "Run ID" x_label = "Thread Count" -y_label = "Throughput in GiB/s LogScale" +y_label = "Throughput in GiB/s" var_label = "Thread Counts" thread_counts = ["1t", "2t", "4t", "8t", "12t"] thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"] engine_counts = ["1mib-1e", "1mib-4e", "1gib-1e", "1gib-4e"] engine_counts_nice = ["1 E/WQ and Tasksize 1 MiB", "4 E/WQ and Tasksize 1 MiB", "1 E/WQ and Tasksize 1 GiB", "4 E/WQ and Tasksize 1 GiB"] -title = "Per-Thread Throughput - 120 Copy Operations split on Threads Intra-Node on DDR with Size 1 MiB" +title = "Per-Thread Throughput - 120 Copy Operations split on Threads Intra-Node on DDR" index = [runid, x_label, var_label] data = [] @@ -42,7 +42,7 @@ def load_time_mesurements(file_path): # therefore we divide the result by 120/n_threads to get the per-element speed return { - "total" : sum([x / (iterations * (120 / count)) for x in list(chain(*[data["list"][i]["report"]["time"]["total"] for i in range(count)]))]), + "total" : sum([x / (iterations * 120) for x in list(chain([data["list"][i]["report"]["time"]["total"] for i in range(count)]))]), "combined" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))], "submission" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))], "completion" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))] @@ -61,7 +61,7 @@ def process_file_to_dataset(file_path, engine_label, thread_count): data_size = 1024*1024 try: - time = load_time_mesurements(file_path)["total"] + time = [load_time_mesurements(file_path)["total"]] run_idx = 0 for t in time: data.append({ runid : run_idx, x_label: thread_count_nice, var_label : engine_nice, y_label : calc_throughput(data_size, t)}) diff --git a/benchmarks/benchmark-plotters/plot-perf-enginelocation.py b/benchmarks/benchmark-plotters/plot-perf-enginelocation.py index 2111cd7..3c010f1 100644 --- a/benchmarks/benchmark-plotters/plot-perf-enginelocation.py +++ b/benchmarks/benchmark-plotters/plot-perf-enginelocation.py @@ -12,7 +12,7 @@ var_label = "Configuration" types = ["intersock-n0ton4-1mib", "internode-n0ton1-1mib", "intersock-n0ton4-1gib", "internode-n0ton1-1gib"] types_nice = ["Inter-Socket Copy 1MiB", "Inter-Node Copy 1MiB", "Inter-Socket Copy 1GiB", "Inter-Node Copy 1GiB"] copy_methods = ["dstcopy", "srccopy", "xcopy", "srcoutsidercopy", "dstoutsidercopy", "sockoutsidercopy", "nodeoutsidercopy"] -copy_methods_nice = [ "Engine on DST-Node", "Engine on SRC-Node", "Cross-Copy / Both Engines", "Engine on SRC-Socket, not SRC-Node", "Engine on DST-Socket, not DST-Node", "Engine on different Socket", "Engine on same Socket but neither SRC nor DST Node"] +copy_methods_nice = [ "Engine on DST-Node", "Engine on SRC-Node", "Cross-Copy / Both Engines", "Engine on SRC-Socket, not SRC-Node", "Engine on DST-Socket, not DST-Node", "Engine on different Socket", "Engine on same Socket"] title = "Performance of Engine Location - Copy Operation on DDR with 1 Engine per WQ" index = [runid, x_label, var_label] @@ -45,14 +45,19 @@ def load_time_mesurements(file_path,method_label): time1 = data["list"][1]["report"]["time"] return { - "total": max(time0["total"],time1["total"]), + "total": max(time0["total"],time1["total"]) / iterations, "combined" : [max(x,y) for x,y in zip(time0["combined"], time1["combined"])], "submission" : [max(x,y) for x,y in zip(time0["completion"], time1["completion"])], - "completion" : [max(x,y) for x,y in zip(time0["submission"], time1["submission"])] + "submission" : [max(x,y) for x,y in zip(time0["completion"], time1["completion"])], } else: - return data["list"][0]["report"]["time"] + return { + "total": data["list"][0]["report"]["time"]["total"] / iterations, + "combined": data["list"][0]["report"]["time"]["combined"], + "submission": data["list"][0]["report"]["time"]["submission"], + "completion": data["list"][0]["report"]["time"]["completion"] + } def create_copy_dataset(file_path, method_label, type_label): method_index = index_from_element(method_label,copy_methods) @@ -61,14 +66,14 @@ def create_copy_dataset(file_path, method_label, type_label): type_nice = types_nice[type_index] data_size = 0 - if type_label in ["internode-n0ton1-1mib", "intersock-n0ton4-1mib"]: - data_size = 1024 * 1024 - else: - data_size = 1024*1024*1024 + + if type_label in ["internode-n0ton1-1gib", "intersock-n0ton4-1gib"]: data_size = 1024*1024*1024 + elif type_label in ["internode-n0ton1-1mib", "intersock-n0ton4-1mib"]: data_size = 1024 * 1024 + else: data_size = 0 try: - time = load_time_mesurements(file_path,method_label)["total"] run_idx = 0 + time = [load_time_mesurements(file_path,method_label)["total"]] for t in time: data.append({ runid : run_idx, x_label: type_nice, var_label : method_nice, y_label : calc_throughput(data_size, t)}) run_idx = run_idx + 1 diff --git a/benchmarks/benchmark-plotters/plot-perf-peakthroughput.py b/benchmarks/benchmark-plotters/plot-perf-peakthroughput.py index fc65159..c8a87d4 100644 --- a/benchmarks/benchmark-plotters/plot-perf-peakthroughput.py +++ b/benchmarks/benchmark-plotters/plot-perf-peakthroughput.py @@ -12,6 +12,7 @@ y_label = "Source Node" v_label = "Throughput" title = "Copy Throughput for 1GiB Elements running on SRC Node" +index = [ runid, x_label, y_label] data = [] @@ -38,10 +39,10 @@ def load_time_mesurements(file_path): iterations = data["list"][0]["task"]["iterations"] return { - "total": data["list"][0]["report"]["total"] / iterations, - "combined": data["list"][0]["report"]["combined"], - "submission": data["list"][0]["report"]["submission"], - "completion": data["list"][0]["report"]["completion"] + "total": data["list"][0]["report"]["time"]["total"] / iterations, + "combined": data["list"][0]["report"]["time"]["combined"], + "submission": data["list"][0]["report"]["time"]["submission"], + "completion": data["list"][0]["report"]["time"]["completion"] } @@ -49,7 +50,7 @@ def process_file_to_dataset(file_path, src_node, dst_node): data_size = 1024*1024*1024 try: - time = load_time_mesurements(file_path)["total"] + time = [load_time_mesurements(file_path)["total"]] run_idx = 0 for t in time: data.append({ runid : run_idx, x_label : dst_node, y_label : src_node, v_label: calc_throughput(data_size, t)}) @@ -67,9 +68,10 @@ def main(): process_file_to_dataset(file, src_node, dst_node) df = pd.DataFrame(data) - data_pivot = df.pivot_table(index=y_label, columns=x_label, values=v_label, aggfunc=mean_without_outliers) + df.set_index(index, inplace=True) + data_pivot = df.pivot_table(index=y_label, columns=x_label, values=v_label) - sns.heatmap(data_pivot, annot=True, palette="rocket", fmt=".0f") + sns.heatmap(data_pivot, annot=True, cmap="rocket_r", fmt=".0f") plt.title(title) plt.savefig(os.path.join(folder_path, "plot-perf-peakthroughput.png"), bbox_inches='tight') diff --git a/benchmarks/benchmark-plotters/plot-perf-submitmethod.py b/benchmarks/benchmark-plotters/plot-perf-submitmethod.py index 22a5cc2..481407f 100644 --- a/benchmarks/benchmark-plotters/plot-perf-submitmethod.py +++ b/benchmarks/benchmark-plotters/plot-perf-submitmethod.py @@ -47,10 +47,10 @@ def load_time_mesurements(file_path,type_label): else: divisor = 1 return { - "total": data["list"][0]["report"]["total"] / (iterations * divisor), - "combined": [ x / divisor for x in data["list"][0]["report"]["combined"]], - "submission": [ x / divisor for x in data["list"][0]["report"]["submission"]], - "completion": [ x / divisor for x in data["list"][0]["report"]["completion"]] + "total": data["list"][0]["report"]["time"]["total"] / (iterations * divisor), + "combined": [ x / divisor for x in data["list"][0]["report"]["time"]["combined"]], + "submission": [ x / divisor for x in data["list"][0]["report"]["time"]["submission"]], + "completion": [ x / divisor for x in data["list"][0]["report"]["time"]["completion"]] } @@ -69,7 +69,7 @@ def process_file_to_dataset(file_path, type_label,size_label): else: data_size = 0 try: - time = load_time_mesurements(file_path,type_label)["total"] + time = [load_time_mesurements(file_path,type_label)["total"]] run_idx = 0 for t in time: data.append({ runid : run_idx, x_label: type_nice, var_label : size_nice, y_label : calc_throughput(data_size, t)})