Browse Source

prepare submitmethod benchmark plotter for the new result type

master
Constantin Fürst 1 year ago
parent
commit
167370dbf9
  1. 72
      benchmarks/benchmark-plotters/plot-perf-submitmethod.py

72
benchmarks/benchmark-plotters/plot-perf-submitmethod.py

@ -6,25 +6,18 @@ from typing import List
import seaborn as sns import seaborn as sns
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
runid = "Run ID"
x_label = "Size of Submitted Task" x_label = "Size of Submitted Task"
y_label = "Throughput in GiB/s" y_label = "Throughput in GiB/s"
var_label = "Submission Type" var_label = "Submission Type"
sizes = ["1kib", "4kib", "1mib", "1gib"] sizes = ["1kib", "4kib", "1mib", "1gib"]
sizes_nice = ["1 KiB", "4 KiB", "1 MiB", "1 GiB"] sizes_nice = ["1 KiB", "4 KiB", "1 MiB", "1 GiB"]
types = ["bs10", "bs50", "ms10", "ms50", "ssaw"] types = ["bs10", "bs50", "ms10", "ms50", "ssaw"]
types_nice = ["Batch, Size 10", "Batch, Size 50", "Multi-Submit, Count 10", "Multi Submit, Count 50", "Single Submit"]
types_nice = ["Batch, Size 10", "Batch, Size 50", "Multi-Submit, Count 10", "Multi-Submit, Count 50", "Single Submit"]
title = "Performance of Submission Methods - Copy Operation tested Intra-Node on DDR" title = "Performance of Submission Methods - Copy Operation tested Intra-Node on DDR"
data = {
x_label : sizes_nice,
types_nice[0] : [],
types_nice[1] : [],
types_nice[2] : [],
types_nice[3] : [],
types_nice[4] : []
}
stdev = {}
index = [runid, x_label, var_label]
data = []
def calc_throughput(size_bytes,time_microseconds): def calc_throughput(size_bytes,time_microseconds):
time_seconds = time_microseconds * 1e-6 time_seconds = time_microseconds * 1e-6
@ -39,20 +32,11 @@ def index_from_element(value,array):
return 0 return 0
def load_and_process_submit_json(file_path,s,t):
def load_and_process_submit_json(file_path):
with open(file_path, 'r') as file: with open(file_path, 'r') as file:
data = json.load(file) data = json.load(file)
time_microseconds = data["list"][0]["report"]["time"]["combined_avg"]
if t not in stdev: stdev[t] = dict()
stdev[t][s] = data["list"][0]["report"]["time"]["combined_stdev"]
return time_microseconds
def stdev_functor(values):
v = values[0]
sd = stdev[v]
return (v - sd, v + sd)
time = { "combined" : data["list"][0]["report"]["time"]["combined"], "submit" : data["list"][0]["report"]["time"]["submit"], "complete" : data["list"][0]["report"]["time"]["complete"]}
return time
# Function to plot the graph for the new benchmark # Function to plot the graph for the new benchmark
def plot_submit_graph(file_paths, type_label): def plot_submit_graph(file_paths, type_label):
@ -63,49 +47,47 @@ def plot_submit_graph(file_paths, type_label):
idx = 0 idx = 0
for file_path in file_paths: for file_path in file_paths:
time_microseconds = load_and_process_submit_json(file_path,sizes_nice[idx],type_nice)
times.append(time_microseconds)
time = load_and_process_submit_json(file_path)
times.append(time["combined"])
idx = idx + 1 idx = idx + 1
# Adjust time measurements based on type # Adjust time measurements based on type
# which can contain multiple submissions # which can contain multiple submissions
if type_label in {"bs10", "ms10"}: if type_label in {"bs10", "ms10"}:
times = [time / 10 for time in times]
times = [[t / 10 for t in time] for time in times]
elif type_label in {"ms50", "bs50"}: elif type_label in {"ms50", "bs50"}:
times = [time / 50 for time in times]
times = [[t / 50 for t in time] for time in times]
times[0] = times[0] / 1
times[1] = times[1] / 4
times[2] = times[2] / 1024
times[3] = times[3] / (1024 * 1024)
times[0] = [t / 1 for t in times[0]]
times[1] = [t / 4 for t in times[1]]
times[2] = [t / (1024) for t in times[2]]
times[3] = [t / (1024*1024) for t in times[3]]
throughput = [calc_throughput(1024,t) for t in times]
throughput = [[calc_throughput(1024,time) for time in t] for t in times]
data[type_nice] = throughput
idx = 0
for run_set in throughput:
run_idx = 0
for run in run_set:
data.append({ runid : run_idx, x_label: sizes_nice[idx], var_label : type_nice, y_label : throughput[idx][run_idx]})
run_idx = run_idx + 1
idx = idx + 1
# Main function to iterate over files and create plots for the new benchmark # Main function to iterate over files and create plots for the new benchmark
def main(): def main():
folder_path = "benchmark-results/submit-bench/" # Replace with the actual path to your folder folder_path = "benchmark-results/submit-bench/" # Replace with the actual path to your folder
for type_label in types: for type_label in types:
file_paths = [os.path.join(folder_path, f"submit-{type_label}-{size}-1e.json") for size in sizes] file_paths = [os.path.join(folder_path, f"submit-{type_label}-{size}-1e.json") for size in sizes]
plot_submit_graph(file_paths, type_label) plot_submit_graph(file_paths, type_label)
df = pd.DataFrame(data) df = pd.DataFrame(data)
dfm = pd.melt(df, id_vars=x_label, var_name=var_label, value_name=y_label)
error_values: List[float] = []
for index,row in dfm.iterrows():
s = dfm[x_label][index]
t = dfm[var_label][index]
error_values.append(stdev[t][s])
dfm["Stdev"] = error_values
print(dfm)
df.set_index(index, inplace=True)
print(df)
sns.catplot(x=x_label, y=y_label, hue=var_label, data=dfm, kind='bar', height=5, aspect=1, palette="viridis", errorbar=("ci", 100))
sns.catplot(x=x_label, y=y_label, hue=var_label, data=df, kind='bar', height=5, aspect=1, palette="viridis", errorbar="sd")
plt.title(title) plt.title(title)
plt.savefig(os.path.join(folder_path, "plot-perf-submitmethod.png"), bbox_inches='tight') plt.savefig(os.path.join(folder_path, "plot-perf-submitmethod.png"), bbox_inches='tight')
plt.show() plt.show()

Loading…
Cancel
Save