Browse Source

remove the mtsubmit performance plotter which was based on a false assumption, modify the mtsubmit cost plotter for the new task set size

master
Constantin Fürst 1 year ago
parent
commit
59853ffc92
  1. 6
      benchmarks/benchmark-plotters/plot-cost-mtsubmit.py
  2. 90
      benchmarks/benchmark-plotters/plot-perf-mtsubmit.py

6
benchmarks/benchmark-plotters/plot-cost-mtsubmit.py

@ -13,7 +13,7 @@ thread_counts = ["1t", "2t", "4t", "8t", "12t"]
thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"] thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"]
engine_counts = ["1e", "4e"] engine_counts = ["1e", "4e"]
engine_counts_nice = ["1 Engine per Group", "4 Engines per Group"] engine_counts_nice = ["1 Engine per Group", "4 Engines per Group"]
title = "Throughput per Thread - Copy Operation Intra-Node on DDR with Size 1 MiB"
title = "Combined Throughput - Copy Operation Intra-Node on DDR with Size 1 MiB"
index = [runid, x_label, var_label] index = [runid, x_label, var_label]
data = [] data = []
@ -57,7 +57,7 @@ def create_mtsubmit_dataset(file_paths, engine_label):
times.append(time["combined"]) times.append(time["combined"])
idx = idx + 1 idx = idx + 1
throughput = [[calc_throughput(1024*1024,time) for time in t] for t in times]
throughput = [[calc_throughput(10*1024*1024,time) for time in t] for t in times]
idx = 0 idx = 0
for run_set in throughput: for run_set in throughput:
@ -82,7 +82,7 @@ def main():
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd") sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
plt.title(title) plt.title(title)
plt.savefig(os.path.join(folder_path, "plot-cost-mtsubmit.png"), bbox_inches='tight')
plt.savefig(os.path.join(folder_path, "plot-perf-mtsubmit.png"), bbox_inches='tight')
plt.show() plt.show()

90
benchmarks/benchmark-plotters/plot-perf-mtsubmit.py

@ -1,90 +0,0 @@
import os
import json
import pandas as pd
from itertools import chain
import seaborn as sns
import matplotlib.pyplot as plt
runid = "Run ID"
x_label = "Thread Count"
y_label = "Throughput in GiB/s"
var_label = "Thread Counts"
thread_counts = ["1t", "2t", "4t", "8t", "12t"]
thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"]
engine_counts = ["1e", "4e"]
engine_counts_nice = ["1 Engine per Group", "4 Engines per Group"]
title = "Combined Throughput - Copy Operation Intra-Node on DDR with Size 1 MiB"
index = [runid, x_label, var_label]
data = []
def calc_throughput(size_bytes,time_microseconds):
time_seconds = time_microseconds * 1e-9
size_gib = size_bytes / (1024 ** 3)
throughput_gibs = size_gib / time_seconds
return throughput_gibs
def index_from_element(value,array):
for (idx,val) in enumerate(array):
if val == value: return idx
return 0
def load_and_process_copy_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
count = data["count"]
return {
"combined" : [x / count for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))],
"submission" : [x / count for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))],
"completion" : [x / count for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))]
}
# Function to plot the graph for the new benchmark
def create_mtsubmit_dataset(file_paths, engine_label):
times = []
engine_index = index_from_element(engine_label,engine_counts)
engine_nice = engine_counts_nice[engine_index]
idx = 0
for file_path in file_paths:
time = load_and_process_copy_json(file_path)
times.append(time["combined"])
idx = idx + 1
throughput = [[calc_throughput(1024*1024,time) for time in t] for t in times]
idx = 0
for run_set in throughput:
run_idx = 0
for run in run_set:
data.append({ runid : run_idx, x_label: thread_counts_nice[idx], var_label : engine_nice, y_label : throughput[idx][run_idx]})
run_idx = run_idx + 1
idx = idx + 1
# Main function to iterate over files and create plots for the new benchmark
def main():
folder_path = "benchmark-results/" # Replace with the actual path to your folder
for engine_label in engine_counts:
mt_file_paths = [os.path.join(folder_path, f"mtsubmit-{thread_count}-{engine_label}.json") for thread_count in thread_counts]
create_mtsubmit_dataset(mt_file_paths, engine_label)
df = pd.DataFrame(data)
df.set_index(index, inplace=True)
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
plt.title(title)
plt.savefig(os.path.join(folder_path, "plot-perf-mtsubmit.png"), bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()
Loading…
Cancel
Save