9 case MemoryLocation::Heap:
11 case MemoryLocation::ManagedSharedAcceleratorDeviceMemory:
12 return "managed-shared-accelerator-device";
17#if defined(GPUOffloadingOff)
22 void* data = std::aligned_alloc(
47 int padding = size % alignment == 0 ? 0 : alignment - (size % alignment);
48 return size + padding;
51#if defined(GPUOffloadingOMP)
52#pragma omp declare target
55 #if defined(GPUOffloadingOff)
56 assertion2( byteCount%
sizeof(
double)==0, ch, byteCount );
58 #if defined(OpenMPManuallyOffloadMemset)
59 for (
size_t i=0; i<byteCount/
sizeof(
double); i++){
64 return static_cast<double*
>(
66 static_cast<void*
>(dest),
74#if defined(GPUOffloadingOMP)
75#pragma omp end declare target
#define assertion2(expr, param0, param1)
std::string toString(Filter filter)
void * allocateRawData(std::size_t size, MemoryLocation location, int device)
double * memset(double *dest, double ch, size_t byteCount)
Alternative GPU-ready version of memset.
void freeMemory(void *data, MemoryLocation location, int device=accelerator::Device::HostDevice)
Free memory.
void gpuAbort()
Delegates to std::abort() if no GPU offloading is active.
std::size_t padSizeToAlignment(std::size_t size, std::size_t alignment)
std::string toString(MemoryLocation value)