This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

99 lines
3.9 KiB

import os
import json
import pandas as pd
from itertools import chain
import seaborn as sns
import matplotlib.pyplot as plt
from common import calc_throughput
runid = "Run ID"
x_label = "Destination Node"
y_label = "Source Node"
v_label = "Throughput"
title_allnodes = \
"""Copy Throughput in GiB/s tested for 1GiB Elements\n
Using all 8 DSA Chiplets available on the System"""
title_smartnodes = \
"""Copy Throughput in GiB/s tested for 1GiB Elements\n
Using Cross-Copy for Intersocket and all 4 Chiplets of Socket for Intrasocket"""
description_smartnodes = \
"""Copy Throughput in GiB/s tested for 1GiB Elements\n
Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n
Using all 4 DSA Chiplets of a Socket for Intra-Socket Operation\n
And using only the Source and Destination Nodes DSA for Inter-Socket"""
description_allnodes = \
"""Copy Throughput in GiB/s tested for 1GiB Elements\n
Nodes of {8...15} are HBM accessors for their counterparts (minus 8)\n
Using all 8 DSA Chiplets available on the System"""
index = [ runid, x_label, y_label]
data = []
# loads the measurements from a given file and processes them
# so that they are normalized, meaning that the timings returned
# are nanoseconds per element transfered
def load_time_mesurements(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
count = data["count"]
batch_size = data["list"][0]["task"]["batching"]["batch_size"] if data["list"][0]["task"]["batching"]["batch_size"] > 0 else 1
iterations = data["list"][0]["task"]["iterations"]
return {
"size": data["list"][0]["task"]["size"],
"total": sum([x / (iterations * batch_size * count * count) for x in list(chain([data["list"][i]["report"]["time"]["total"] for i in range(count)]))]),
"combined": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))],
"submission": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))],
"completion": [ x / (count * batch_size) for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))]
}
# procceses a single file and appends the desired timings
# to the global data-array, handles multiple runs with a runid
# and ignores if the given file is not found as some
# configurations may not be benchmarked
def process_file_to_dataset(file_path, src_node, dst_node):
try:
file_data = load_time_mesurements(file_path)
time = [file_data["total"]]
run_idx = 0
for t in time:
data.append({ runid : run_idx, x_label : dst_node, y_label : src_node, v_label: calc_throughput(file_data["size"], t)})
run_idx = run_idx + 1
except FileNotFoundError:
return
# loops over all possible configuration combinations and calls
# process_file_to_dataset for them in order to build a dataframe
# which is then displayed and saved
def main(node_config,title):
folder_path = "benchmark-results/"
for src_node in range(16):
for dst_node in range(16):
size = "512mib" if src_node == dst_node and src_node >= 8 else "1gib"
file = os.path.join(folder_path, f"copy-n{src_node}ton{dst_node}-{size}-{node_config}-1e.json")
process_file_to_dataset(file, src_node, dst_node)
df = pd.DataFrame(data)
data.clear()
df.set_index(index, inplace=True)
data_pivot = df.pivot_table(index=y_label, columns=x_label, values=v_label)
plt.figure(figsize=(8, 6))
sns.heatmap(data_pivot, annot=True, cmap="rocket_r", fmt=".0f")
plt.title(title)
plt.savefig(os.path.join(folder_path, f"plot-perf-{node_config}-throughput.png"), bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main("allnodes", title_allnodes)
main("smart", title_smartnodes)