Browse Source

add class-definition comments and clear some double-newlines

master
Constantin Fürst 12 months ago
parent
commit
9c06bd4fa9
  1. 15
      offloading-cacher/cache-data.hpp
  2. 10
      offloading-cacher/cache.hpp

15
offloading-cacher/cache-data.hpp

@ -13,13 +13,14 @@
namespace dsacache { namespace dsacache {
class Cache; class Cache;
// the cache task structure will be used to submit and
// control a cache element, while providing source pointer
// and size in bytes for submission
//
// then the submitting thread may wait on the atomic "result"
// which will be notified by the cache worker upon processing
// after which the atomic-bool-ptr active will also become valid
// cache data holds all required information on
// one cache entry and will both be stored
// internally by the cache and handed out
// as copies to the user
// this class uses its object lifetime and
// a global reference counter to allow
// thread-safe copies and resource management
class CacheData { class CacheData {
public: public:
using dml_handler = dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>>; using dml_handler = dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>>;

10
offloading-cacher/cache.hpp

@ -16,8 +16,12 @@
#include "cache-data.hpp" #include "cache-data.hpp"
namespace dsacache { namespace dsacache {
// singleton which holds the cache workers
// and is the place where work will be submited
// cache class will handle access to data through the cache
// by managing the cache through work submission, it sticks
// to user-defined caching and copy policies, is thread
// safe after initialization and returns copies of
// cache data class to the user
class Cache { class Cache {
public: public:
// cache policy is defined as a type here to allow flexible usage of the cacher // cache policy is defined as a type here to allow flexible usage of the cacher
@ -234,7 +238,6 @@ inline dml::handler<dml::mem_copy_operation, std::allocator<uint8_t>> dsacache::
return dml::submit<dml::automatic>(dml::mem_copy.block_on_fault(), srcv, dstv); return dml::submit<dml::automatic>(dml::mem_copy.block_on_fault(), srcv, dstv);
} }
void dsacache::Cache::GetCacheNode(uint8_t* src, const size_t size, int* OUT_DST_NODE, int* OUT_SRC_NODE) const { void dsacache::Cache::GetCacheNode(uint8_t* src, const size_t size, int* OUT_DST_NODE, int* OUT_SRC_NODE) const {
// obtain numa node of current thread to determine where the data is needed // obtain numa node of current thread to determine where the data is needed
@ -251,7 +254,6 @@ void dsacache::Cache::GetCacheNode(uint8_t* src, const size_t size, int* OUT_DST
*OUT_DST_NODE = cache_policy_function_(current_node, *OUT_SRC_NODE, size); *OUT_DST_NODE = cache_policy_function_(current_node, *OUT_SRC_NODE, size);
} }
inline void dsacache::Cache::Flush(const int node) { inline void dsacache::Cache::Flush(const int node) {
std::cout << "[-] Flushing Cache for " << (node == -1 ? "all nodes" : "node " + std::to_string(node)) << std::endl; std::cout << "[-] Flushing Cache for " << (node == -1 ? "all nodes" : "node " + std::to_string(node)) << std::endl;

Loading…
Cancel
Save