This contains my bachelors thesis and associated tex files, code snippets and maybe more. Topic: Data Movement in Heterogeneous Memories with Intel Data Streaming Accelerator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

91 lines
2.9 KiB

  1. import os
  2. import pandas as pd
  3. import seaborn as sns
  4. import matplotlib.pyplot as plt
  5. from common import calc_throughput, index_from_element, load_time_mesurements
  6. runid = "Run ID"
  7. x_label = "Thread Count"
  8. y_label = "Throughput in GiB/s"
  9. var_label = "Transfer Size"
  10. thread_counts = ["1t", "2t", "12t"]
  11. thread_counts_nice = ["1 Thread", "2 Threads", "12 Threads"]
  12. size_labels = ["1mib", "1gib"]
  13. size_labels_nice = ["1 MiB", "1 GiB"]
  14. title = \
  15. """Total Throughput showing cost of MT Submit\n
  16. Copying 120x split on n Threads Intra-Node on DDR\n
  17. """
  18. description = \
  19. """Total Throughput showing cost of MT Submit\n
  20. Running 120 Copy Operations split on n Threads\n
  21. Copying Intra-Node on DDR performed for multiple Configurations\n
  22. """
  23. index = [runid, x_label, var_label]
  24. data = []
  25. # loads the measurements from a given file and processes them
  26. # so that they are normalized, meaning that the timings returned
  27. # are nanoseconds per element transfered
  28. def get_timing(file_path,thread_count):
  29. divisor = 0
  30. if thread_count == "1t": divisor = 1
  31. elif thread_count == "2t" : divisor = 2
  32. elif thread_count == "12t" : divisor = 12
  33. return [ x / divisor for x in load_time_mesurements(file_path)]
  34. # procceses a single file and appends the desired timings
  35. # to the global data-array, handles multiple runs with a runid
  36. # and ignores if the given file is not found as some
  37. # configurations may not be benchmarked
  38. def process_file_to_dataset(file_path, size_label, thread_count):
  39. size_index = index_from_element(size_label,size_labels)
  40. size_nice = size_labels_nice[size_index]
  41. threadc_index = index_from_element(thread_count, thread_counts)
  42. thread_count_nice = thread_counts_nice[threadc_index]
  43. data_size = 0
  44. if size_label == "1gib" : data_size = 1024*1024*1024
  45. elif size_label == "1mib" : data_size = 1024*1024
  46. timing = get_timing(file_path, thread_count)
  47. run_idx = 0
  48. for t in timing:
  49. data.append({ runid : run_idx, x_label: thread_count_nice, var_label : size_nice, y_label : calc_throughput(data_size, t)})
  50. run_idx = run_idx + 1
  51. # loops over all possible configuration combinations and calls
  52. # process_file_to_dataset for them in order to build a dataframe
  53. # which is then displayed and saved
  54. def main():
  55. result_path = "benchmark-results/"
  56. output_path = "benchmark-plots/"
  57. for size in size_labels:
  58. for thread_count in thread_counts:
  59. file = os.path.join(result_path, f"mtsubmit-{thread_count}-{size}.json")
  60. process_file_to_dataset(file, size, thread_count)
  61. df = pd.DataFrame(data)
  62. df.set_index(index, inplace=True)
  63. plt.figure(figsize=(4, 4))
  64. plt.ylim(0, 30)
  65. sns.barplot(x=x_label, y=y_label, hue=var_label, data=df, palette="mako", errorbar="sd")
  66. plt.savefig(os.path.join(output_path, "plot-mtsubmit.pdf"), bbox_inches='tight')
  67. plt.show()
  68. if __name__ == "__main__":
  69. main()