diff --git a/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp b/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp --- a/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp +++ b/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp @@ -2620,13 +2620,26 @@ void *Ptr = NULL; assert(DeviceId < DeviceInfo().NumberOfDevices && "Device ID too large"); - if (Kind != TARGET_ALLOC_DEFAULT) { + hsa_amd_memory_pool_t MemoryPool; + switch (Kind) { + case TARGET_ALLOC_DEFAULT: + // GPU memory + MemoryPool = DeviceInfo().getDeviceMemoryPool(DeviceId); + break; + case TARGET_ALLOC_HOST: + // non-migratable memory accessible by host and device(s) + MemoryPool = DeviceInfo().getHostMemoryPool(); + break; + case TARGET_ALLOC_SHARED: + // migratable memory accessible by host and device + MemoryPool = DeviceInfo().getHostMemoryPool(); + break; + default: REPORT("Invalid target data allocation kind or requested allocator not " "implemented yet\n"); return NULL; } - hsa_amd_memory_pool_t MemoryPool = DeviceInfo().getDeviceMemoryPool(DeviceId); hsa_status_t Err = hsa_amd_memory_pool_allocate(MemoryPool, Size, 0, &Ptr); DP("Tgt alloc data %ld bytes, (tgt:%016llx).\n", Size, (long long unsigned)(Elf64_Addr)Ptr); @@ -2677,6 +2690,10 @@ int32_t __tgt_rtl_data_delete(int DeviceId, void *TgtPtr) { assert(DeviceId < DeviceInfo().NumberOfDevices && "Device ID too large"); + + // TgtPtr is either a TARGET_ALLOC_DEFAULT or a TARGET_ALLOC_HOST, either way + // code::runtime::Memfree can handle it + hsa_status_t Err; DP("Tgt free data (tgt:%016llx).\n", (long long unsigned)(Elf64_Addr)TgtPtr); Err = core::Runtime::Memfree(TgtPtr); diff --git a/openmp/libomptarget/test/api/omp_device_managed_memory.c b/openmp/libomptarget/test/api/omp_device_managed_memory.c --- a/openmp/libomptarget/test/api/omp_device_managed_memory.c +++ b/openmp/libomptarget/test/api/omp_device_managed_memory.c @@ -1,5 +1,4 @@ -// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda -// REQUIRES: nvptx64-nvidia-cuda +// RUN: %libomptarget-compile-run-and-check #include #include diff --git a/openmp/libomptarget/test/api/omp_device_managed_memory_alloc.c b/openmp/libomptarget/test/api/omp_device_managed_memory_alloc.c --- a/openmp/libomptarget/test/api/omp_device_managed_memory_alloc.c +++ b/openmp/libomptarget/test/api/omp_device_managed_memory_alloc.c @@ -1,5 +1,4 @@ -// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda -// REQUIRES: nvptx64-nvidia-cuda +// RUN: %libomptarget-compile-run-and-check #include #include diff --git a/openmp/libomptarget/test/api/omp_host_pinned_memory.c b/openmp/libomptarget/test/api/omp_host_pinned_memory.c --- a/openmp/libomptarget/test/api/omp_host_pinned_memory.c +++ b/openmp/libomptarget/test/api/omp_host_pinned_memory.c @@ -1,5 +1,4 @@ -// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda -// REQUIRES: nvptx64-nvidia-cuda +// RUN: %libomptarget-compile-run-and-check #include #include diff --git a/openmp/libomptarget/test/api/omp_host_pinned_memory_alloc.c b/openmp/libomptarget/test/api/omp_host_pinned_memory_alloc.c --- a/openmp/libomptarget/test/api/omp_host_pinned_memory_alloc.c +++ b/openmp/libomptarget/test/api/omp_host_pinned_memory_alloc.c @@ -1,5 +1,4 @@ -// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda -// REQUIRES: nvptx64-nvidia-cuda +// RUN: %libomptarget-compile-run-and-check #include #include