This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

102 lines
3.9 KiB

import os
import json
import pandas as pd
from pandas.core.ops import methods
import seaborn as sns
import matplotlib.pyplot as plt
runid = "Run ID"
x_label = "Copy Type"
y_label = "Throughput in GiB/s"
var_label = "Configuration"
types = ["intersock-n0ton4-1mib", "internode-n0ton1-1mib", "intersock-n0ton4-1gib", "internode-n0ton1-1gib"]
types_nice = ["Inter-Socket 1MiB", "Inter-Node 1MiB", "Inter-Socket 1GiB", "Inter-Node 1GiB"]
copy_methods = ["dstcopy", "srccopy", "xcopy", "srcoutsidercopy", "dstoutsidercopy", "sockoutsidercopy", "nodeoutsidercopy"]
copy_methods_nice = [ "Engine on DST-Node", "Engine on SRC-Node", "Cross-Copy / Both Engines", "Engine on SRC-Socket, not SRC-Node", "Engine on DST-Socket, not DST-Node", "Engine on different Socket", "Engine on same Socket"]
title = "Performance of Engine Location - Copy Operation on DDR with 1 Engine per WQ"
index = [runid, x_label, var_label]
data = []
def calc_throughput(size_bytes,time_ns):
time_seconds = time_ns * 1e-9
size_gib = size_bytes / (1024 ** 3)
throughput_gibs = size_gib / time_seconds
return throughput_gibs
def index_from_element(value,array):
for (idx,val) in enumerate(array):
if val == value: return idx
return 0
def load_time_mesurements(file_path,method_label):
with open(file_path, 'r') as file:
data = json.load(file)
iterations = data["list"][0]["task"]["iterations"]
if method_label == "xcopy":
# xcopy runs on two engines that both copy 1/2 of the entire
# specified size of 1gib, therefore the maximum time between
# these two is going to be the total time for copy
time0 = data["list"][0]["report"]["time"]
time1 = data["list"][1]["report"]["time"]
return {
"total": max(time0["total"],time1["total"]) / iterations,
"combined" : [max(x,y) for x,y in zip(time0["combined"], time1["combined"])],
"submission" : [max(x,y) for x,y in zip(time0["completion"], time1["completion"])],
"submission" : [max(x,y) for x,y in zip(time0["completion"], time1["completion"])],
}
else:
return {
"total": data["list"][0]["report"]["time"]["total"] / iterations,
"combined": data["list"][0]["report"]["time"]["combined"],
"submission": data["list"][0]["report"]["time"]["submission"],
"completion": data["list"][0]["report"]["time"]["completion"]
}
def create_copy_dataset(file_path, method_label, type_label):
method_index = index_from_element(method_label,copy_methods)
method_nice = copy_methods_nice[method_index]
type_index = index_from_element(type_label, types)
type_nice = types_nice[type_index]
data_size = 0
if type_label in ["internode-n0ton1-1gib", "intersock-n0ton4-1gib"]: data_size = 1024*1024*1024
elif type_label in ["internode-n0ton1-1mib", "intersock-n0ton4-1mib"]: data_size = 1024 * 1024
else: data_size = 0
try:
run_idx = 0
time = [load_time_mesurements(file_path,method_label)["total"]]
for t in time:
data.append({ runid : run_idx, x_label: type_nice, var_label : method_nice, y_label : calc_throughput(data_size, t)})
run_idx = run_idx + 1
except FileNotFoundError:
return
def main():
folder_path = "benchmark-results/"
for method_label in copy_methods:
for type_label in types:
file = os.path.join(folder_path, f"{method_label}-{type_label}-1e.json")
create_copy_dataset(file, method_label, type_label)
df = pd.DataFrame(data)
df.set_index(index, inplace=True)
df = df.sort_values(y_label)
sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
plt.title(title)
plt.savefig(os.path.join(folder_path, "plot-perf-enginelocation.png"), bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()