Constantin Fürst
11 months ago
4 changed files with 113 additions and 3243 deletions
-
722benchmarks/benchmark-descriptors/peak-perf-brute-cpu/copy-n0ton11-1gib-allnodes-cpu.json
-
1262benchmarks/benchmark-descriptors/peak-perf-brute-cpu/copy-n0ton12-1gib-allnodes-cpu.json
-
1262benchmarks/benchmark-descriptors/peak-perf-brute-cpu/copy-n0ton15-1gib-allnodes-cpu.json
-
110benchmarks/benchmark-plotters/plot-perf-peakthroughput-cpu-bar.py
1262
benchmarks/benchmark-descriptors/peak-perf-brute-cpu/copy-n0ton12-1gib-allnodes-cpu.json
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
1262
benchmarks/benchmark-descriptors/peak-perf-brute-cpu/copy-n0ton15-1gib-allnodes-cpu.json
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,110 @@ |
|||
import os |
|||
import json |
|||
import pandas as pd |
|||
from itertools import chain |
|||
import seaborn as sns |
|||
import matplotlib.pyplot as plt |
|||
|
|||
from common import calc_throughput |
|||
|
|||
result_path = "benchmark-results/" |
|||
output_path = "benchmark-plots/" |
|||
|
|||
runid = "Run ID" |
|||
x_label = "Destination Node" |
|||
y_label = "Throughput in GiB/s" |
|||
|
|||
title_allnodes = \ |
|||
"""Copy Throughput in GiB/s tested for 1GiB Elements\n |
|||
Using all 8 DSA Chiplets available on the System""" |
|||
title_smartnodes = \ |
|||
"""Copy Throughput in GiB/s tested for 1GiB Elements\n |
|||
Using Cross-Copy for Intersocket and all 4 Chiplets of Socket for Intrasocket""" |
|||
title_difference = \ |
|||
"""Gain in Copy Throughput in GiB/s of All-DSA vs. Smart Assignment""" |
|||
|
|||
description_smartnodes = \ |
|||
"""Copy Throughput in GiB/s tested for 1GiB Elements\n |
|||
Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n |
|||
Using all 4 DSA Chiplets of a Socket for Intra-Socket Operation\n |
|||
And using only the Source and Destination Nodes DSA for Inter-Socket""" |
|||
description_allnodes = \ |
|||
"""Copy Throughput in GiB/s tested for 1GiB Elements\n |
|||
Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n |
|||
Using all 8 DSA Chiplets available on the System""" |
|||
|
|||
index = [ runid, x_label, y_label] |
|||
data = [] |
|||
|
|||
|
|||
# loads the measurements from a given file and processes them |
|||
# so that they are normalized, meaning that the timings returned |
|||
# are nanoseconds per element transfered |
|||
def load_time_mesurements(file_path): |
|||
with open(file_path, 'r') as file: |
|||
data = json.load(file) |
|||
count = data["count"] |
|||
batch_size = data["list"][0]["task"]["batching"]["batch_size"] if data["list"][0]["task"]["batching"]["batch_size"] > 0 else 1 |
|||
iterations = data["list"][0]["task"]["iterations"] |
|||
|
|||
return { |
|||
"size": data["list"][0]["task"]["size"], |
|||
"total": sum([x / (iterations * batch_size * count * count) for x in list(chain([data["list"][i]["report"]["time"]["total"] for i in range(count)]))]), |
|||
"combined": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))], |
|||
"submission": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))], |
|||
"completion": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))] |
|||
} |
|||
|
|||
|
|||
# procceses a single file and appends the desired timings |
|||
# to the global data-array, handles multiple runs with a runid |
|||
# and ignores if the given file is not found as some |
|||
# configurations may not be benchmarked |
|||
def process_file_to_dataset(file_path, src_node, dst_node): |
|||
try: |
|||
file_data = load_time_mesurements(file_path) |
|||
time = file_data["combined"] |
|||
run_idx = 0 |
|||
for t in time: |
|||
size = file_data["size"] |
|||
tp = calc_throughput(size, t) |
|||
data.append({ runid : run_idx, x_label : dst_node, y_label : tp}) |
|||
run_idx = run_idx + 1 |
|||
except FileNotFoundError: |
|||
return |
|||
|
|||
|
|||
def plot_bar(table,title,node_config): |
|||
plt.figure(figsize=(8, 6)) |
|||
|
|||
sns.barplot(x=x_label, y=y_label, data=table, palette="rocket", errorbar=None) |
|||
|
|||
plt.ylim(0, 100) |
|||
|
|||
plt.savefig(os.path.join(output_path, f"plot-perf-{node_config}-cpu-throughput-selectbarplot.pdf"), bbox_inches='tight') |
|||
plt.show() |
|||
|
|||
|
|||
# loops over all possible configuration combinations and calls |
|||
# process_file_to_dataset for them in order to build a dataframe |
|||
# which is then displayed and saved |
|||
def main(node_config,title): |
|||
src_node = 0 |
|||
for dst_node in {8,11,12,15}: |
|||
size = "512mib" if node_config == "allnodes" and src_node == dst_node and src_node >= 8 else "1gib" |
|||
file = os.path.join(result_path, f"copy-n{src_node}ton{dst_node}-{size}-{node_config}-cpu-1e.json") |
|||
process_file_to_dataset(file, src_node, dst_node) |
|||
|
|||
df = pd.DataFrame(data) |
|||
|
|||
data.clear() |
|||
df.set_index(index, inplace=True) |
|||
|
|||
plot_bar(df, title, node_config) |
|||
|
|||
return df |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
dall = main("allnodes", title_allnodes) |
|||
dsmart = main("smart", title_smartnodes) |
Write
Preview
Loading…
Cancel
Save
Reference in new issue