This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

46 lines
1.4 KiB

  1. import os
  2. import json
  3. from typing import List
  4. from numpy import float64
  5. # calculates throughput in gib/s from the meassured
  6. # transfer duration (in nanoseconds) for a given element
  7. # with the size of this given in bytes
  8. def calc_throughput(size_bytes,time_ns):
  9. time_seconds = time_ns * 1e-9
  10. size_gib = size_bytes / (1024 ** 3)
  11. throughput_gibs = size_gib / time_seconds
  12. return throughput_gibs
  13. # reverse array search: return index of value in array
  14. def index_from_element(value,array):
  15. for (idx,val) in enumerate(array):
  16. if val == value: return idx
  17. return 0
  18. # loads the measurements from a given file
  19. def load_time_mesurements(file_path):
  20. with open(file_path, 'r') as file:
  21. data = json.load(file)
  22. count = data["count"]
  23. runcount_divisor = data["list"][0]["task"]["reps"]
  24. # if theres more than one thread, the internal repetition
  25. # count should be the same. if you decide it shouldnt
  26. # remove the check below
  27. if count > 1:
  28. for i in range(count):
  29. if runcount_divisor != data["list"][i]["task"]["reps"]:
  30. print("Runcount missmatch between tasks. Check the commend above, aborting for now.")
  31. os.abort()
  32. return [ x / runcount_divisor for x in data["timings"]]
  33. def get_task_count(file_path):
  34. with open(file_path, 'r') as file:
  35. return json.load(file)["count"]