This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

95 lines
3.5 KiB

  1. import os
  2. import json
  3. import pandas as pd
  4. from itertools import chain
  5. import seaborn as sns
  6. import matplotlib.pyplot as plt
  7. runid = "Run ID"
  8. x_label = "Thread Count"
  9. y_label = "Throughput in GiB/s LogScale"
  10. var_label = "Thread Counts"
  11. thread_counts = ["1t", "2t", "4t", "8t", "12t"]
  12. thread_counts_nice = ["1 Thread", "2 Threads", "4 Threads", "8 Threads", "12 Threads"]
  13. engine_counts = ["1mib-1e", "1mib-4e", "1gib-1e", "1gib-4e"]
  14. engine_counts_nice = ["1 E/WQ and Tasksize 1 MiB", "4 E/WQ and Tasksize 1 MiB", "1 E/WQ and Tasksize 1 GiB", "4 E/WQ and Tasksize 1 GiB"]
  15. title = "Per-Thread Throughput - 120 Copy Operations split on Threads Intra-Node on DDR with Size 1 MiB"
  16. index = [runid, x_label, var_label]
  17. data = []
  18. def calc_throughput(size_bytes,time_nanosec):
  19. time_seconds = time_nanosec * 1e-9
  20. size_gib = size_bytes / (1024 ** 3)
  21. throughput_gibs = size_gib / time_seconds
  22. return throughput_gibs
  23. def index_from_element(value,array):
  24. for (idx,val) in enumerate(array):
  25. if val == value: return idx
  26. return 0
  27. def load_and_process_copy_json(file_path):
  28. with open(file_path, 'r') as file:
  29. data = json.load(file)
  30. count = data["count"]
  31. iterations = data["list"][0]["task"]["iterations"]
  32. return {
  33. "total" : sum([x / (iterations * (120 / count)) for x in list(chain(*[data["list"][i]["report"]["time"]["total"] for i in range(count)]))]),
  34. "combined" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["combined"] for i in range(count)]))],
  35. "submission" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["submission"] for i in range(count)]))],
  36. "completion" : [x / (120 / count) for x in list(chain(*[data["list"][i]["report"]["time"]["completion"] for i in range(count)]))]
  37. }
  38. # Function to plot the graph for the new benchmark
  39. def create_mtsubmit_dataset(file_paths, engine_label):
  40. times = []
  41. engine_index = index_from_element(engine_label,engine_counts)
  42. engine_nice = engine_counts_nice[engine_index]
  43. idx = 0
  44. for file_path in file_paths:
  45. time = load_and_process_copy_json(file_path)
  46. times.append(time["total"])
  47. idx = idx + 1
  48. throughput = []
  49. if engine_label in ["1gib-1e", "1gib-4e"]:
  50. throughput = [[calc_throughput(1024*1024*1024,time) for time in t] for t in times]
  51. else:
  52. throughput = [[calc_throughput(1024*1024,time) for time in t] for t in times]
  53. idx = 0
  54. for run_set in throughput:
  55. run_idx = 0
  56. for run in run_set:
  57. data.append({ runid : run_idx, x_label: thread_counts_nice[idx], var_label : engine_nice, y_label : throughput[idx][run_idx]})
  58. run_idx = run_idx + 1
  59. idx = idx + 1
  60. # Main function to iterate over files and create plots for the new benchmark
  61. def main():
  62. folder_path = "benchmark-results/" # Replace with the actual path to your folder
  63. for engine_label in engine_counts:
  64. mt_file_paths = [os.path.join(folder_path, f"mtsubmit-{thread_count}-{engine_label}.json") for thread_count in thread_counts]
  65. create_mtsubmit_dataset(mt_file_paths, engine_label)
  66. df = pd.DataFrame(data)
  67. df.set_index(index, inplace=True)
  68. sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="rocket", errorbar="sd")
  69. plt.title(title)
  70. plt.savefig(os.path.join(folder_path, "plot-perf-mtsubmit.png"), bbox_inches='tight')
  71. plt.show()
  72. if __name__ == "__main__":
  73. main()