Browse Source
add benchmark plotting scripts using seaborn to display the results, the plots are also added
master
add benchmark plotting scripts using seaborn to display the results, the plots are also added
master
Constantin Fürst
1 year ago
6 changed files with 262 additions and 0 deletions
-
77benchmarks/benchmark-plotters/plot-cost-mtsubmit.py
-
80benchmarks/benchmark-plotters/plot-perf-enginelocation.py
-
105benchmarks/benchmark-plotters/plot-perf-submitmethod.py
-
BINbenchmarks/benchmark-results/cross-copy-bench/plot-perf-enginelocation.png
-
BINbenchmarks/benchmark-results/mtsubmit-bench/plot-cost-mtsubmit.png
-
BINbenchmarks/benchmark-results/submit-bench/plot-perf-submitmethod.png
@ -0,0 +1,77 @@ |
|||
import os |
|||
import json |
|||
import pandas as pd |
|||
from pandas.core.ops import methods |
|||
import seaborn as sns |
|||
import matplotlib.pyplot as plt |
|||
|
|||
x_label = "Copy Type" |
|||
y_label = "Time in Microseconds" |
|||
var_label = "Thread Counts" |
|||
thread_counts = ["1t", "2t", "4t", "8t", "12t"] |
|||
thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"] |
|||
engine_counts = ["1e", "4e"] |
|||
engine_counts_nice = ["1 Engine per Group", "4 Engines per Group"] |
|||
|
|||
data = { |
|||
x_label : thread_counts_nice, |
|||
engine_counts_nice[0] : [], |
|||
engine_counts_nice[1] : [], |
|||
} |
|||
|
|||
|
|||
def index_from_element(value,array): |
|||
for (idx,val) in enumerate(array): |
|||
if val == value: return idx |
|||
return 0 |
|||
|
|||
|
|||
# Function to load and process the JSON file for the multi-threaded benchmark |
|||
def load_and_process_mt_json(file_path): |
|||
with open(file_path, 'r') as file: |
|||
data = json.load(file) |
|||
|
|||
# Extracting count from JSON structure |
|||
count = data["count"] |
|||
|
|||
# Extracting time from JSON structure for elements 0 to count |
|||
times = [data["list"][i]["report"]["time"]["combined_avg"] for i in range(count)] |
|||
|
|||
# Calculating the average of times |
|||
average_time = sum(times) / count |
|||
|
|||
return average_time |
|||
|
|||
|
|||
# Function to plot the graph for the new benchmark |
|||
def plot_mt_graph(file_paths, engine_label): |
|||
times = [] |
|||
|
|||
for file_path in file_paths: |
|||
# Load and process JSON file for the new benchmark |
|||
time_microseconds = load_and_process_mt_json(file_path) |
|||
times.append(time_microseconds) |
|||
|
|||
engine_index = index_from_element(engine_label,engine_counts) |
|||
engine_nice = engine_counts_nice[engine_index] |
|||
|
|||
data[engine_nice] = times |
|||
|
|||
|
|||
# Main function to iterate over files and create plots for the new benchmark |
|||
def main(): |
|||
folder_path = "benchmark-results/mtsubmit-bench/" # Replace with the actual path to your folder |
|||
|
|||
for engine_label in engine_counts: |
|||
mt_file_paths = [os.path.join(folder_path, f"mtsubmit-{thread_count}-{engine_label}.json") for thread_count in thread_counts] |
|||
plot_mt_graph(mt_file_paths, engine_label) |
|||
|
|||
df = pd.DataFrame(data) |
|||
dfm = pd.melt(df, id_vars=x_label, var_name=var_label, value_name=y_label) |
|||
|
|||
sns.catplot(x=x_label, y=y_label, hue=var_label, data=dfm, kind='bar', height=5, aspect=1, palette="viridis") |
|||
plt.savefig(os.path.join(folder_path, "plot-cost-mtsubmit.png")) |
|||
plt.show() |
|||
|
|||
if __name__ == "__main__": |
|||
main() |
@ -0,0 +1,80 @@ |
|||
import os |
|||
import json |
|||
import pandas as pd |
|||
from pandas.core.ops import methods |
|||
import seaborn as sns |
|||
import matplotlib.pyplot as plt |
|||
|
|||
x_label = "Copy Type" |
|||
y_label = "Time in Microseconds" |
|||
var_label = "Configuration" |
|||
types = ["intersock-n0ton4", "internode-n0ton1"] |
|||
types_nice = ["Inter-Socket Copy", "Inter-Node Copy"] |
|||
copy_methods = ["dstcopy", "srccopy", "xcopy"] |
|||
copy_methods_nice = [ "Engine on DST-Node", "Engine on SRC-Node", "Cross-Copy / Both Engines" ] |
|||
|
|||
data = { |
|||
x_label : types_nice, |
|||
copy_methods_nice[0] : [], |
|||
copy_methods_nice[1] : [], |
|||
copy_methods_nice[2] : [] |
|||
} |
|||
|
|||
|
|||
def index_from_element(value,array): |
|||
for (idx,val) in enumerate(array): |
|||
if val == value: return idx |
|||
return 0 |
|||
|
|||
|
|||
# Function to load and process the JSON file for the new benchmark |
|||
def load_and_process_copy_json(file_path, method_label): |
|||
with open(file_path, 'r') as file: |
|||
data = json.load(file) |
|||
|
|||
# Extracting time from JSON structure |
|||
if method_label == "xcopy": |
|||
# For xcopy method, add times from two entries and divide by 4 |
|||
time_entry1 = data["list"][0]["report"]["time"]["combined_avg"] |
|||
time_entry2 = data["list"][1]["report"]["time"]["combined_avg"] |
|||
time_microseconds = (time_entry1 + time_entry2) / 4 |
|||
else: |
|||
# For other methods, use the time from the single entry |
|||
time_microseconds = data["list"][0]["report"]["time"]["combined_avg"] |
|||
|
|||
return time_microseconds |
|||
|
|||
|
|||
# Function to plot the graph for the new benchmark |
|||
def plot_copy_graph(file_paths, method_label): |
|||
times = [] |
|||
|
|||
for file_path in file_paths: |
|||
# Load and process JSON file for the new benchmark |
|||
time_microseconds = load_and_process_copy_json(file_path, method_label) |
|||
times.append(time_microseconds) |
|||
|
|||
method_index = index_from_element(method_label,copy_methods) |
|||
method_nice = copy_methods_nice[method_index] |
|||
|
|||
data[method_nice] = times |
|||
|
|||
|
|||
# Main function to iterate over files and create plots for the new benchmark |
|||
def main(): |
|||
folder_path = "benchmark-results/cross-copy-bench/" # Replace with the actual path to your folder |
|||
|
|||
|
|||
for method_label in copy_methods: |
|||
copy_file_paths = [os.path.join(folder_path, f"{method_label}-{type_label}-1mib-4e.json") for type_label in types] |
|||
plot_copy_graph(copy_file_paths, method_label) |
|||
|
|||
df = pd.DataFrame(data) |
|||
dfm = pd.melt(df, id_vars=x_label, var_name=var_label, value_name=y_label) |
|||
|
|||
sns.catplot(x=x_label, y=y_label, hue=var_label, data=dfm, kind='bar', height=5, aspect=1, palette="viridis") |
|||
plt.savefig(os.path.join(folder_path, "plot-perf-enginelocation.png")) |
|||
plt.show() |
|||
|
|||
if __name__ == "__main__": |
|||
main() |
@ -0,0 +1,105 @@ |
|||
import os |
|||
import json |
|||
import pandas as pd |
|||
from pandas.core.ops import methods |
|||
from typing import List |
|||
import seaborn as sns |
|||
import matplotlib.pyplot as plt |
|||
|
|||
x_label = "Size of Submitted Task" |
|||
y_label = "Time to Copy 1 KiB in Microseconds" |
|||
var_label = "Submission Type" |
|||
sizes = ["1kib", "4kib", "1mib", "1gib"] |
|||
sizes_nice = ["1 KiB", "4 KiB", "1 MiB", "1 GiB"] |
|||
types = ["bs10", "bs50", "ms10", "ms50", "ssaw"] |
|||
types_nice = ["Batch, Size 10", "Batch, Size 50", "Multi-Submit, Count 10", "Multi Submit, Count 50", "Single Submit"] |
|||
|
|||
data = { |
|||
x_label : sizes_nice, |
|||
types_nice[0] : [], |
|||
types_nice[1] : [], |
|||
types_nice[2] : [], |
|||
types_nice[3] : [], |
|||
types_nice[4] : [] |
|||
} |
|||
|
|||
stdev = {} |
|||
|
|||
|
|||
def index_from_element(value,array): |
|||
for (idx,val) in enumerate(array): |
|||
if val == value: return idx |
|||
return 0 |
|||
|
|||
|
|||
def load_and_process_submit_json(file_path,s,t): |
|||
with open(file_path, 'r') as file: |
|||
data = json.load(file) |
|||
time_microseconds = data["list"][0]["report"]["time"]["combined_avg"] |
|||
if t not in stdev: stdev[t] = dict() |
|||
stdev[t][s] = data["list"][0]["report"]["time"]["combined_stdev"] |
|||
return time_microseconds |
|||
|
|||
|
|||
def stdev_functor(values): |
|||
v = values[0] |
|||
sd = stdev[v] |
|||
return (v - sd, v + sd) |
|||
|
|||
|
|||
# Function to plot the graph for the new benchmark |
|||
def plot_submit_graph(file_paths, type_label): |
|||
times = [] |
|||
|
|||
type_index = index_from_element(type_label,types) |
|||
type_nice = types_nice[type_index] |
|||
|
|||
idx = 0 |
|||
for file_path in file_paths: |
|||
time_microseconds = load_and_process_submit_json(file_path,sizes_nice[idx],type_nice) |
|||
times.append(time_microseconds) |
|||
idx = idx + 1 |
|||
|
|||
# Adjust time measurements based on type |
|||
# which can contain multiple submissions |
|||
if type_label in {"bs10", "ms10"}: |
|||
times = [time / 10 for time in times] |
|||
elif type_label in {"ms50", "bs50"}: |
|||
times = [time / 50 for time in times] |
|||
|
|||
times[0] = times[0] / 1 |
|||
times[1] = times[1] / 4 |
|||
times[2] = times[2] / 1024 |
|||
times[3] = times[3] / (1024 * 1024) |
|||
|
|||
data[type_nice] = times |
|||
|
|||
|
|||
# Main function to iterate over files and create plots for the new benchmark |
|||
def main(): |
|||
folder_path = "benchmark-results/submit-bench/" # Replace with the actual path to your folder |
|||
|
|||
for type_label in types: |
|||
file_paths = [os.path.join(folder_path, f"submit-{type_label}-{size}-1e.json") for size in sizes] |
|||
plot_submit_graph(file_paths, type_label) |
|||
|
|||
df = pd.DataFrame(data) |
|||
dfm = pd.melt(df, id_vars=x_label, var_name=var_label, value_name=y_label) |
|||
|
|||
error_values: List[float] = [] |
|||
for index,row in dfm.iterrows(): |
|||
s = dfm[x_label][index] |
|||
t = dfm[var_label][index] |
|||
error_values.append(stdev[t][s]) |
|||
|
|||
dfm["Stdev"] = error_values |
|||
|
|||
print(dfm) |
|||
|
|||
sns.catplot(x=x_label, y=y_label, hue=var_label, data=dfm, kind='bar', height=5, aspect=1, palette="viridis", errorbar=("ci", 100)) |
|||
plt.title("Performance of Submission Methods - Copy Operatione tested Intra-Node on DDR") |
|||
plt.savefig(os.path.join(folder_path, "plot-perf-submitmethod.png")) |
|||
plt.show() |
|||
|
|||
if __name__ == "__main__": |
|||
main() |
After Width: 733 | Height: 500 | Size: 25 KiB |
After Width: 692 | Height: 500 | Size: 21 KiB |
After Width: 710 | Height: 500 | Size: 34 KiB |
Write
Preview
Loading…
Cancel
Save
Reference in new issue