diff --git a/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py b/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py index d568a9c..7de358f 100644 --- a/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py +++ b/benchmarks/benchmark-plotters/plot-cost-mtsubmit.py @@ -11,9 +11,9 @@ y_label = "Throughput in GiB/s" var_label = "Thread Counts" thread_counts = ["1t", "2t", "4t", "8t", "12t"] thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"] -engine_counts = ["1mib-1e", "1mib-4e", "1gib-1e", "1gib-4e"] +engine_counts = ["1mib-1e_PREVENT_FROM_DISPLAYING", "1mib-4e_PREVENT_FROM_DISPLAYING", "1gib-1e", "1gib-4e"] engine_counts_nice = ["1 E/WQ and Tasksize 1 MiB", "4 E/WQ and Tasksize 1 MiB", "1 E/WQ and Tasksize 1 GiB", "4 E/WQ and Tasksize 1 GiB"] -title = "Per-Thread Throughput - 120 Copy Operations split on Threads Intra-Node on DDR" +title = "Total Throughput - 120 Copy Operations split on Threads Intra-Node on DDR" index = [runid, x_label, var_label] data = [] @@ -43,9 +43,9 @@ def load_time_mesurements(file_path): return { "total" : sum([x / (iterations * 120) for x in list(chain([data["list"][i]["report"]["time"]["total"] for i in range(count)]))]), - "combined" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))], - "submission" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))], - "completion" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))] + "combined" : [x / 120 for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))], + "submission" : [x / 120 for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))], + "completion" : [x / 120 for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))] } def process_file_to_dataset(file_path, engine_label, thread_count): @@ -55,13 +55,12 @@ def process_file_to_dataset(file_path, engine_label, thread_count): thread_count_nice = thread_counts_nice[threadc_index] data_size = 0 - if engine_label in ["1gib-1e", "1gib-4e"]: - data_size = 1024*1024*1024 - else: - data_size = 1024*1024 + if engine_label in ["1gib-1e", "1gib-4e"]: data_size = 1024*1024*1024 + elif engine_label in ["1mib-1e", "1mib-4e"]: data_size = 1024*1024 + else: data_size = 0 try: - time = [load_time_mesurements(file_path)["total"]] + time = load_time_mesurements(file_path)["combined"] run_idx = 0 for t in time: data.append({ runid : run_idx, x_label: thread_count_nice, var_label : engine_nice, y_label : calc_throughput(data_size, t)}) diff --git a/benchmarks/benchmark-plotters/plot-perf-enginelocation.py b/benchmarks/benchmark-plotters/plot-perf-enginelocation.py index 3c010f1..c878c95 100644 --- a/benchmarks/benchmark-plotters/plot-perf-enginelocation.py +++ b/benchmarks/benchmark-plotters/plot-perf-enginelocation.py @@ -10,7 +10,7 @@ x_label = "Copy Type" y_label = "Throughput in GiB/s" var_label = "Configuration" types = ["intersock-n0ton4-1mib", "internode-n0ton1-1mib", "intersock-n0ton4-1gib", "internode-n0ton1-1gib"] -types_nice = ["Inter-Socket Copy 1MiB", "Inter-Node Copy 1MiB", "Inter-Socket Copy 1GiB", "Inter-Node Copy 1GiB"] +types_nice = ["Inter-Socket 1MiB", "Inter-Node 1MiB", "Inter-Socket 1GiB", "Inter-Node 1GiB"] copy_methods = ["dstcopy", "srccopy", "xcopy", "srcoutsidercopy", "dstoutsidercopy", "sockoutsidercopy", "nodeoutsidercopy"] copy_methods_nice = [ "Engine on DST-Node", "Engine on SRC-Node", "Cross-Copy / Both Engines", "Engine on SRC-Socket, not SRC-Node", "Engine on DST-Socket, not DST-Node", "Engine on different Socket", "Engine on same Socket"] title = "Performance of Engine Location - Copy Operation on DDR with 1 Engine per WQ" diff --git a/benchmarks/benchmark-plotters/plot-perf-submitmethod.py b/benchmarks/benchmark-plotters/plot-perf-submitmethod.py index 481407f..4777ad8 100644 --- a/benchmarks/benchmark-plotters/plot-perf-submitmethod.py +++ b/benchmarks/benchmark-plotters/plot-perf-submitmethod.py @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt runid = "Run ID" x_label = "Size of Submitted Task" -y_label = "Throughput in GiB/s, LogScale" +y_label = "Throughput in GiB/s" var_label = "Submission Type" sizes = ["1kib", "4kib", "1mib", "32mib"] sizes_nice = ["1 KiB", "4 KiB", "1 MiB", "32 MiB"] @@ -69,10 +69,10 @@ def process_file_to_dataset(file_path, type_label,size_label): else: data_size = 0 try: - time = [load_time_mesurements(file_path,type_label)["total"]] + time = load_time_mesurements(file_path,type_label)["combined"] run_idx = 0 for t in time: - data.append({ runid : run_idx, x_label: type_nice, var_label : size_nice, y_label : calc_throughput(data_size, t)}) + data.append({ runid : run_idx, x_label: size_nice, var_label : type_nice, y_label : calc_throughput(data_size, t)}) run_idx = run_idx + 1 except FileNotFoundError: return @@ -92,6 +92,7 @@ def main(): df = df.sort_values(y_label) sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd") + plt.title(title) plt.savefig(os.path.join(folder_path, "plot-opt-submitmethod.png"), bbox_inches='tight') plt.show()