You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
88 lines
2.8 KiB
88 lines
2.8 KiB
import os
|
|
import json
|
|
import pandas as pd
|
|
from pandas.core.ops import methods
|
|
import seaborn as sns
|
|
import matplotlib.pyplot as plt
|
|
|
|
x_label = "Copy Type"
|
|
y_label = "Throughput in GiB/s"
|
|
var_label = "Thread Counts"
|
|
thread_counts = ["1t", "2t", "4t", "8t", "12t"]
|
|
thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"]
|
|
engine_counts = ["1e", "4e"]
|
|
engine_counts_nice = ["1 Engine per Group", "4 Engines per Group"]
|
|
title = "Performance of Multi-Threaded Submit - Copy Operation Intra-Node on DDR with Size 1 MiB"
|
|
|
|
data = {
|
|
x_label : thread_counts_nice,
|
|
engine_counts_nice[0] : [],
|
|
engine_counts_nice[1] : [],
|
|
}
|
|
|
|
|
|
def calc_throughput(size_bytes,time_microseconds):
|
|
time_seconds = time_microseconds * 1e-6
|
|
size_gib = size_bytes / (1024 ** 3)
|
|
throughput_gibs = size_gib / time_seconds
|
|
return throughput_gibs
|
|
|
|
|
|
def index_from_element(value,array):
|
|
for (idx,val) in enumerate(array):
|
|
if val == value: return idx
|
|
return 0
|
|
|
|
|
|
# Function to load and process the JSON file for the multi-threaded benchmark
|
|
def load_and_process_mt_json(file_path):
|
|
with open(file_path, 'r') as file:
|
|
data = json.load(file)
|
|
|
|
# Extracting count from JSON structure
|
|
count = data["count"]
|
|
|
|
# Extracting time from JSON structure for elements 0 to count
|
|
times = [data["list"][i]["report"]["time"]["combined_avg"] for i in range(count)]
|
|
|
|
# Calculating the average of times
|
|
average_time = sum(times) / count
|
|
|
|
return average_time
|
|
|
|
|
|
# Function to plot the graph for the new benchmark
|
|
def plot_mt_graph(file_paths, engine_label):
|
|
times = []
|
|
|
|
for file_path in file_paths:
|
|
# Load and process JSON file for the new benchmark
|
|
time_microseconds = load_and_process_mt_json(file_path)
|
|
times.append(time_microseconds)
|
|
|
|
engine_index = index_from_element(engine_label,engine_counts)
|
|
engine_nice = engine_counts_nice[engine_index]
|
|
|
|
throughput = [calc_throughput(1024*1024, t) for t in times]
|
|
|
|
data[engine_nice] = throughput
|
|
|
|
|
|
# Main function to iterate over files and create plots for the new benchmark
|
|
def main():
|
|
folder_path = "benchmark-results/mtsubmit-bench/" # Replace with the actual path to your folder
|
|
|
|
for engine_label in engine_counts:
|
|
mt_file_paths = [os.path.join(folder_path, f"mtsubmit-{thread_count}-{engine_label}.json") for thread_count in thread_counts]
|
|
plot_mt_graph(mt_file_paths, engine_label)
|
|
|
|
df = pd.DataFrame(data)
|
|
dfm = pd.melt(df, id_vars=x_label, var_name=var_label, value_name=y_label)
|
|
|
|
sns.catplot(x=x_label, y=y_label, hue=var_label, data=dfm, kind='bar', height=5, aspect=1, palette="viridis")
|
|
plt.title(title)
|
|
plt.savefig(os.path.join(folder_path, "plot-cost-mtsubmit.png"), bbox_inches='tight')
|
|
plt.show()
|
|
|
|
if __name__ == "__main__":
|
|
main()
|