Browse Source
modify plotters to a more streamlined state, all now use the file-loop in main and have a function that processes one file into the dataset, also adds the peakthroughput plotter and removes the defunct opt-submitmethod plotter
master
modify plotters to a more streamlined state, all now use the file-loop in main and have a function that processes one file into the dataset, also adds the peakthroughput plotter and removes the defunct opt-submitmethod plotter
master
Constantin Fürst
1 year ago
5 changed files with 155 additions and 178 deletions
-
47benchmarks/benchmark-plotters/plot-cost-mtsubmit.py
-
104benchmarks/benchmark-plotters/plot-opt-submitmethod.py
-
16benchmarks/benchmark-plotters/plot-perf-enginelocation.py
-
80benchmarks/benchmark-plotters/plot-perf-peakthroughput.py
-
86benchmarks/benchmark-plotters/plot-perf-submitmethod.py
@ -1,104 +0,0 @@ |
|||||
import os |
|
||||
import json |
|
||||
import pandas as pd |
|
||||
from pandas.core.ops import methods |
|
||||
from typing import List |
|
||||
import seaborn as sns |
|
||||
import matplotlib.pyplot as plt |
|
||||
|
|
||||
runid = "Run ID" |
|
||||
x_label = "Size of Submitted Task" |
|
||||
y_label = "Throughput in GiB/s, LogScale" |
|
||||
var_label = "Submission Type" |
|
||||
sizes = ["1kib", "4kib", "1mib", "32mib"] |
|
||||
sizes_nice = ["1 KiB", "4 KiB", "1 MiB", "32 MiB"] |
|
||||
types = ["bs10", "bs50", "ms10", "ms50", "ssaw"] |
|
||||
types_nice = ["Batch, Size 10", "Batch, Size 50", "Multi-Submit, Count 10", "Multi-Submit, Count 50", "Single Submit"] |
|
||||
title = "Optimal Submission Method - Copy Operation tested Intra-Node on DDR" |
|
||||
|
|
||||
index = [runid, x_label, var_label] |
|
||||
data = [] |
|
||||
|
|
||||
def calc_throughput(size_bytes,time_microseconds): |
|
||||
time_seconds = time_microseconds * 1e-9 |
|
||||
size_gib = size_bytes / (1024 ** 3) |
|
||||
throughput_gibs = size_gib / time_seconds |
|
||||
return throughput_gibs |
|
||||
|
|
||||
|
|
||||
def index_from_element(value,array): |
|
||||
for (idx,val) in enumerate(array): |
|
||||
if val == value: return idx |
|
||||
return 0 |
|
||||
|
|
||||
|
|
||||
def load_and_process_submit_json(file_path): |
|
||||
with open(file_path, 'r') as file: |
|
||||
data = json.load(file) |
|
||||
iterations = data["list"][0]["task"]["iterations"] |
|
||||
|
|
||||
return { |
|
||||
"total": data["list"][0]["report"]["total"] / iterations, |
|
||||
"combined": data["list"][0]["report"]["combined"], |
|
||||
"submission": data["list"][0]["report"]["submission"], |
|
||||
"completion": data["list"][0]["report"]["completion"] |
|
||||
} |
|
||||
|
|
||||
|
|
||||
# Function to plot the graph for the new benchmark |
|
||||
def create_submit_dataset(file_paths, type_label): |
|
||||
times = [] |
|
||||
|
|
||||
type_index = index_from_element(type_label,types) |
|
||||
type_nice = types_nice[type_index] |
|
||||
|
|
||||
idx = 0 |
|
||||
for file_path in file_paths: |
|
||||
time = load_and_process_submit_json(file_path) |
|
||||
times.append(time["total"]) |
|
||||
idx = idx + 1 |
|
||||
|
|
||||
# Adjust time measurements based on type |
|
||||
# which can contain multiple submissions |
|
||||
if type_label in {"bs10", "ms10"}: |
|
||||
times = [[t / 10 for t in time] for time in times] |
|
||||
elif type_label in {"ms50", "bs50"}: |
|
||||
times = [[t / 50 for t in time] for time in times] |
|
||||
|
|
||||
times[0] = [t / 1 for t in times[0]] |
|
||||
times[1] = [t / 4 for t in times[1]] |
|
||||
times[2] = [t / (1024) for t in times[2]] |
|
||||
times[3] = [t / (32*1024) for t in times[3]] |
|
||||
|
|
||||
throughput = [[calc_throughput(1024,time) for time in t] for t in times] |
|
||||
|
|
||||
idx = 0 |
|
||||
for run_set in throughput: |
|
||||
run_idx = 0 |
|
||||
for run in run_set: |
|
||||
data.append({ runid : run_idx, x_label: sizes_nice[idx], var_label : type_nice, y_label : throughput[idx][run_idx]}) |
|
||||
run_idx = run_idx + 1 |
|
||||
idx = idx + 1 |
|
||||
|
|
||||
|
|
||||
# Main function to iterate over files and create plots for the new benchmark |
|
||||
def main(): |
|
||||
folder_path = "benchmark-results/" # Replace with the actual path to your folder |
|
||||
|
|
||||
for type_label in types: |
|
||||
file_paths = [os.path.join(folder_path, f"submit-{type_label}-{size}-1e.json") for size in sizes] |
|
||||
create_submit_dataset(file_paths, type_label) |
|
||||
|
|
||||
df = pd.DataFrame(data) |
|
||||
df.set_index(index, inplace=True) |
|
||||
df = df.sort_values(y_label) |
|
||||
|
|
||||
ax = sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd") |
|
||||
ax.set(yscale="log") |
|
||||
sns.move_legend(ax, "lower right") |
|
||||
plt.title(title) |
|
||||
plt.savefig(os.path.join(folder_path, "plot-opt-submitmethod.png"), bbox_inches='tight') |
|
||||
plt.show() |
|
||||
|
|
||||
if __name__ == "__main__": |
|
||||
main() |
|
@ -0,0 +1,80 @@ |
|||||
|
import os |
||||
|
import json |
||||
|
import pandas as pd |
||||
|
from pandas.core.ops import methods |
||||
|
from typing import List |
||||
|
import seaborn as sns |
||||
|
import matplotlib.pyplot as plt |
||||
|
|
||||
|
runid = "Run ID" |
||||
|
x_label = "Destination Node" |
||||
|
y_label = "Source Node" |
||||
|
v_label = "Throughput" |
||||
|
title = "Copy Throughput for 1GiB Elements running on SRC Node" |
||||
|
|
||||
|
data = [] |
||||
|
|
||||
|
|
||||
|
def mean_without_outliers(x): |
||||
|
return x.sort_values()[2:-2].mean() |
||||
|
|
||||
|
|
||||
|
def calc_throughput(size_bytes,time_ns): |
||||
|
time_seconds = time_ns * 1e-9 |
||||
|
size_gib = size_bytes / (1024 ** 3) |
||||
|
throughput_gibs = size_gib / time_seconds |
||||
|
return throughput_gibs |
||||
|
|
||||
|
|
||||
|
def index_from_element(value,array): |
||||
|
for (idx,val) in enumerate(array): |
||||
|
if val == value: return idx |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
def load_time_mesurements(file_path): |
||||
|
with open(file_path, 'r') as file: |
||||
|
data = json.load(file) |
||||
|
iterations = data["list"][0]["task"]["iterations"] |
||||
|
|
||||
|
return { |
||||
|
"total": data["list"][0]["report"]["total"] / iterations, |
||||
|
"combined": data["list"][0]["report"]["combined"], |
||||
|
"submission": data["list"][0]["report"]["submission"], |
||||
|
"completion": data["list"][0]["report"]["completion"] |
||||
|
} |
||||
|
|
||||
|
|
||||
|
def process_file_to_dataset(file_path, src_node, dst_node): |
||||
|
data_size = 1024*1024*1024 |
||||
|
|
||||
|
try: |
||||
|
time = load_time_mesurements(file_path)["total"] |
||||
|
run_idx = 0 |
||||
|
for t in time: |
||||
|
data.append({ runid : run_idx, x_label : dst_node, y_label : src_node, v_label: calc_throughput(data_size, t)}) |
||||
|
run_idx = run_idx + 1 |
||||
|
except FileNotFoundError: |
||||
|
return |
||||
|
|
||||
|
|
||||
|
def main(): |
||||
|
folder_path = "benchmark-results/" |
||||
|
|
||||
|
for src_node in range(16): |
||||
|
for dst_node in range(16): |
||||
|
file = os.path .join(folder_path, f"copy-n{src_node}ton{dst_node}-1gib-1e.json") |
||||
|
process_file_to_dataset(file, src_node, dst_node) |
||||
|
|
||||
|
df = pd.DataFrame(data) |
||||
|
data_pivot = df.pivot_table(index=y_label, columns=x_label, values=v_label, aggfunc=mean_without_outliers) |
||||
|
|
||||
|
sns.heatmap(data_pivot, annot=True, palette="rocket", fmt=".0f") |
||||
|
|
||||
|
plt.title(title) |
||||
|
plt.savefig(os.path.join(folder_path, "plot-perf-peakthroughput.png"), bbox_inches='tight') |
||||
|
plt.show() |
||||
|
|
||||
|
|
||||
|
if __name__ == "__main__": |
||||
|
main() |
Write
Preview
Loading…
Cancel
Save
Reference in new issue