Browse Source

finish the move to save entire results and not condensed average in the plotter scripts

master
Constantin Fürst 1 year ago
parent
commit
c27514890e
  1. 70
      benchmarks/benchmark-plotters/plot-cost-mtsubmit.py
  2. 74
      benchmarks/benchmark-plotters/plot-perf-enginelocation.py
  3. 90
      benchmarks/benchmark-plotters/plot-perf-mtsubmit.py
  4. 15
      benchmarks/benchmark-plotters/plot-perf-submitmethod.py
  5. BIN
      benchmarks/benchmark-results/plot-perf-submitmethod.png

70
benchmarks/benchmark-plotters/plot-cost-mtsubmit.py

@ -1,28 +1,26 @@
import os
import json
import pandas as pd
from pandas.core.ops import methods
from itertools import chain
import seaborn as sns
import matplotlib.pyplot as plt
x_label = "Copy Type"
runid = "Run ID"
x_label = "Thread Count"
y_label = "Throughput in GiB/s"
var_label = "Thread Counts"
thread_counts = ["1t", "2t", "4t", "8t", "12t"]
thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"]
engine_counts = ["1e", "4e"]
engine_counts_nice = ["1 Engine per Group", "4 Engines per Group"]
title = "Performance of Multi-Threaded Submit - Copy Operation Intra-Node on DDR with Size 1 MiB"
title = "Throughput per Thread - Copy Operation Intra-Node on DDR with Size 1 MiB"
data = {
x_label : thread_counts_nice,
engine_counts_nice[0] : [],
engine_counts_nice[1] : [],
}
index = [runid, x_label, var_label]
data = []
def calc_throughput(size_bytes,time_microseconds):
time_seconds = time_microseconds * 1e-6
time_seconds = time_microseconds * 1e-9
size_gib = size_bytes / (1024 ** 3)
throughput_gibs = size_gib / time_seconds
return throughput_gibs
@ -34,55 +32,59 @@ def index_from_element(value,array):
return 0
# Function to load and process the JSON file for the multi-threaded benchmark
def load_and_process_mt_json(file_path):
def load_and_process_copy_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
# Extracting count from JSON structure
count = data["count"]
# Extracting time from JSON structure for elements 0 to count
times = [data["list"][i]["report"]["time"]["combined_avg"] for i in range(count)]
# Calculating the average of times
average_time = sum(times) / count
return average_time
return {
"combined" : list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)])),
"submission" : list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)])),
"completion" : list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))
}
# Function to plot the graph for the new benchmark
def plot_mt_graph(file_paths, engine_label):
def create_mtsubmit_dataset(file_paths, engine_label):
times = []
for file_path in file_paths:
# Load and process JSON file for the new benchmark
time_microseconds = load_and_process_mt_json(file_path)
times.append(time_microseconds)
engine_index = index_from_element(engine_label,engine_counts)
engine_nice = engine_counts_nice[engine_index]
throughput = [calc_throughput(1024*1024, t) for t in times]
idx = 0
for file_path in file_paths:
time = load_and_process_copy_json(file_path)
times.append(time["combined"])
idx = idx + 1
throughput = [[calc_throughput(1024*1024,time) for time in t] for t in times]
data[engine_nice] = throughput
idx = 0
for run_set in throughput:
run_idx = 0
for run in run_set:
data.append({ runid : run_idx, x_label: thread_counts_nice[idx], var_label : engine_nice, y_label : throughput[idx][run_idx]})
run_idx = run_idx + 1
idx = idx + 1
# Main function to iterate over files and create plots for the new benchmark
def main():
folder_path = "benchmark-results/mtsubmit-bench/" # Replace with the actual path to your folder
folder_path = "benchmark-results/" # Replace with the actual path to your folder
for engine_label in engine_counts:
mt_file_paths = [os.path.join(folder_path, f"mtsubmit-{thread_count}-{engine_label}.json") for thread_count in thread_counts]
plot_mt_graph(mt_file_paths, engine_label)
create_mtsubmit_dataset(mt_file_paths, engine_label)
df = pd.DataFrame(data)
dfm = pd.melt(df, id_vars=x_label, var_name=var_label, value_name=y_label)
df.set_index(index, inplace=True)
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
sns.catplot(x=x_label, y=y_label, hue=var_label, data=dfm, kind='bar', height=5, aspect=1, palette="viridis")
plt.title(title)
plt.savefig(os.path.join(folder_path, "plot-cost-mtsubmit.png"), bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()
main()

74
benchmarks/benchmark-plotters/plot-perf-enginelocation.py

@ -5,6 +5,7 @@ from pandas.core.ops import methods
import seaborn as sns
import matplotlib.pyplot as plt
runid = "Run ID"
x_label = "Copy Type"
y_label = "Throughput in GiB/s"
var_label = "Configuration"
@ -12,18 +13,13 @@ types = ["intersock-n0ton4", "internode-n0ton1"]
types_nice = ["Inter-Socket Copy", "Inter-Node Copy"]
copy_methods = ["dstcopy", "srccopy", "xcopy"]
copy_methods_nice = [ "Engine on DST-Node", "Engine on SRC-Node", "Cross-Copy / Both Engines" ]
title = "Performance of Engine Location - Copy Operation on DDR with Size 1 MiB"
data = {
x_label : types_nice,
copy_methods_nice[0] : [],
copy_methods_nice[1] : [],
copy_methods_nice[2] : []
}
title = "Performance of Engine Location - Copy Operation on DDR with Size 1 MiB and 1 Engine per WQ"
index = [runid, x_label, var_label]
data = []
def calc_throughput(size_bytes,time_microseconds):
time_seconds = time_microseconds * 1e-6
time_seconds = time_microseconds * 1e-9
size_gib = size_bytes / (1024 ** 3)
throughput_gibs = size_gib / time_seconds
return throughput_gibs
@ -35,57 +31,65 @@ def index_from_element(value,array):
return 0
# Function to load and process the JSON file for the new benchmark
def load_and_process_copy_json(file_path, method_label):
def load_and_process_copy_json(file_path,method_label):
with open(file_path, 'r') as file:
data = json.load(file)
# Extracting time from JSON structure
if method_label == "xcopy":
# For xcopy method, add times from two entries and divide by 4
time_entry1 = data["list"][0]["report"]["time"]["combined_avg"]
time_entry2 = data["list"][1]["report"]["time"]["combined_avg"]
time_microseconds = (time_entry1 + time_entry2) / 4
else:
# For other methods, use the time from the single entry
time_microseconds = data["list"][0]["report"]["time"]["combined_avg"]
# For xcopy method, add times from two entries and divide by 3
time0 = data["list"][0]["report"]["time"]
time1 = data["list"][1]["report"]["time"]
return time_microseconds
return {
"combined" : [sum(x) / 4 for x in zip(time0["combined"], time1["combined"])],
"submission" : [sum(x) / 4 for x in zip(time0["completion"], time1["completion"])],
"completion" : [sum(x) / 4 for x in zip(time0["submission"], time1["submission"])]
}
else:
return data["list"][0]["report"]["time"]
# Function to plot the graph for the new benchmark
def plot_copy_graph(file_paths, method_label):
def create_copy_dataset(file_paths, method_label):
times = []
for file_path in file_paths:
# Load and process JSON file for the new benchmark
time_microseconds = load_and_process_copy_json(file_path, method_label)
times.append(time_microseconds)
method_index = index_from_element(method_label,copy_methods)
method_nice = copy_methods_nice[method_index]
throughput = [calc_throughput(1024*1024, t) for t in times]
idx = 0
for file_path in file_paths:
time = load_and_process_copy_json(file_path,method_label)
times.append(time["combined"])
idx = idx + 1
data[method_nice] = throughput
throughput = [[calc_throughput(1024*1024,time) for time in t] for t in times]
idx = 0
for run_set in throughput:
run_idx = 0
for run in run_set:
data.append({ runid : run_idx, x_label: types_nice[idx], var_label : method_nice, y_label : throughput[idx][run_idx]})
run_idx = run_idx + 1
idx = idx + 1
# Main function to iterate over files and create plots for the new benchmark
def main():
folder_path = "benchmark-results/cross-copy-bench/" # Replace with the actual path to your folder
folder_path = "benchmark-results/"
for method_label in copy_methods:
copy_file_paths = [os.path.join(folder_path, f"{method_label}-{type_label}-1mib-4e.json") for type_label in types]
plot_copy_graph(copy_file_paths, method_label)
copy_file_paths = [os.path.join(folder_path, f"{method_label}-{type_label}-1mib-1e.json") for type_label in types]
create_copy_dataset(copy_file_paths, method_label)
df = pd.DataFrame(data)
dfm = pd.melt(df, id_vars=x_label, var_name=var_label, value_name=y_label)
df.set_index(index, inplace=True)
df = df.sort_values(y_label)
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
sns.catplot(x=x_label, y=y_label, hue=var_label, data=dfm, kind='bar', height=5, aspect=1, palette="viridis")
plt.title(title)
plt.savefig(os.path.join(folder_path, "plot-perf-enginelocation.png"), bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()
main()

90
benchmarks/benchmark-plotters/plot-perf-mtsubmit.py

@ -0,0 +1,90 @@
import os
import json
import pandas as pd
from itertools import chain
import seaborn as sns
import matplotlib.pyplot as plt
runid = "Run ID"
x_label = "Thread Count"
y_label = "Throughput in GiB/s"
var_label = "Thread Counts"
thread_counts = ["1t", "2t", "4t", "8t", "12t"]
thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"]
engine_counts = ["1e", "4e"]
engine_counts_nice = ["1 Engine per Group", "4 Engines per Group"]
title = "Combined Throughput - Copy Operation Intra-Node on DDR with Size 1 MiB"
index = [runid, x_label, var_label]
data = []
def calc_throughput(size_bytes,time_microseconds):
time_seconds = time_microseconds * 1e-9
size_gib = size_bytes / (1024 ** 3)
throughput_gibs = size_gib / time_seconds
return throughput_gibs
def index_from_element(value,array):
for (idx,val) in enumerate(array):
if val == value: return idx
return 0
def load_and_process_copy_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
count = data["count"]
return {
"combined" : [x / count for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))],
"submission" : [x / count for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))],
"completion" : [x / count for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))]
}
# Function to plot the graph for the new benchmark
def create_mtsubmit_dataset(file_paths, engine_label):
times = []
engine_index = index_from_element(engine_label,engine_counts)
engine_nice = engine_counts_nice[engine_index]
idx = 0
for file_path in file_paths:
time = load_and_process_copy_json(file_path)
times.append(time["combined"])
idx = idx + 1
throughput = [[calc_throughput(1024*1024,time) for time in t] for t in times]
idx = 0
for run_set in throughput:
run_idx = 0
for run in run_set:
data.append({ runid : run_idx, x_label: thread_counts_nice[idx], var_label : engine_nice, y_label : throughput[idx][run_idx]})
run_idx = run_idx + 1
idx = idx + 1
# Main function to iterate over files and create plots for the new benchmark
def main():
folder_path = "benchmark-results/" # Replace with the actual path to your folder
for engine_label in engine_counts:
mt_file_paths = [os.path.join(folder_path, f"mtsubmit-{thread_count}-{engine_label}.json") for thread_count in thread_counts]
create_mtsubmit_dataset(mt_file_paths, engine_label)
df = pd.DataFrame(data)
df.set_index(index, inplace=True)
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
plt.title(title)
plt.savefig(os.path.join(folder_path, "plot-perf-mtsubmit.png"), bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()

15
benchmarks/benchmark-plotters/plot-perf-submitmethod.py

@ -20,7 +20,7 @@ index = [runid, x_label, var_label]
data = []
def calc_throughput(size_bytes,time_microseconds):
time_seconds = time_microseconds * 1e-6
time_seconds = time_microseconds * 1e-9
size_gib = size_bytes / (1024 ** 3)
throughput_gibs = size_gib / time_seconds
return throughput_gibs
@ -35,16 +35,11 @@ def index_from_element(value,array):
def load_and_process_submit_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
time = {
"combined" : data["list"][0]["report"]["time"]["combined"],
"submit" : data["list"][0]["report"]["time"]["submission"],
"complete" : data["list"][0]["report"]["time"]["completion"]
}
return data["list"][0]["report"]["time"]
return time
# Function to plot the graph for the new benchmark
def plot_submit_graph(file_paths, type_label):
def create_submit_dataset(file_paths, type_label):
times = []
type_index = index_from_element(type_label,types)
@ -68,7 +63,7 @@ def plot_submit_graph(file_paths, type_label):
times[2] = [t / (1024) for t in times[2]]
times[3] = [t / (32*1024) for t in times[3]]
throughput = [[calc_throughput(1000*1000,time) for time in t] for t in times]
throughput = [[calc_throughput(1024,time) for time in t] for t in times]
idx = 0
for run_set in throughput:
@ -85,7 +80,7 @@ def main():
for type_label in types:
file_paths = [os.path.join(folder_path, f"submit-{type_label}-{size}-1e.json") for size in sizes]
plot_submit_graph(file_paths, type_label)
create_submit_dataset(file_paths, type_label)
df = pd.DataFrame(data)
df.set_index(index, inplace=True)

BIN
benchmarks/benchmark-results/plot-perf-submitmethod.png

Before

Width: 691  |  Height: 453  |  Size: 36 KiB

After

Width: 691  |  Height: 453  |  Size: 36 KiB

Loading…
Cancel
Save