This contains my bachelors thesis and associated tex files, code snippets and maybe more.
Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
|
|
import os import json
from typing import List from numpy import float64
# calculates throughput in gib/s from the meassured # transfer duration (in nanoseconds) for a given element # with the size of this given in bytes def calc_throughput(size_bytes,time_ns): time_seconds = time_ns * 1e-9 size_gib = size_bytes / (1024 ** 3) throughput_gibs = size_gib / time_seconds return throughput_gibs
# reverse array search: return index of value in array def index_from_element(value,array): for (idx,val) in enumerate(array): if val == value: return idx return 0
# loads the measurements from a given file def load_time_mesurements(file_path): with open(file_path, 'r') as file: data = json.load(file) count = data["count"] runcount_divisor = data["list"][0]["task"]["reps"]
# if theres more than one thread, the internal repetition # count should be the same. if you decide it shouldnt # remove the check below
if count > 1: for i in range(count): if runcount_divisor != data["list"][i]["task"]["reps"]: print("Runcount missmatch between tasks. Check the commend above, aborting for now.") os.abort()
return [ x / runcount_divisor for x in data["timings"]]
def get_task_count(file_path): with open(file_path, 'r') as file: return json.load(file)["count"]
|