Index: openmp/libomptarget/test/api/omp_metadirective01.c =================================================================== --- /dev/null +++ openmp/libomptarget/test/api/omp_metadirective01.c @@ -0,0 +1,29 @@ +// RUN: %libomptarget-compile-run-and-check-generic + +#include +#include +#include + +void do_work(int *ptr, const int size) { + for (int i = 0; i < size; i ++) + ptr[i] = 1; +} + +int main() { + const int n = 1000; + const int buf_size = sizeof(int) * n; + const int dev = omp_get_default_device(); + int *ptr = (int *) malloc(buf_size); // possibly compiled on + + // Unified Shared Memory system + const int accessible = ptr != NULL ? 1 : 0; // omp_target_is_accessible(ptr, buf_size, dev); +#pragma omp metadirective when(user={condition(accessible)}: target firstprivate(ptr)) \ + default(target map(ptr[:n])) +{ + do_work(ptr, n); +} + +free(ptr); + +return 0; +} Index: openmp/libomptarget/test/api/omp_metadirective02.c =================================================================== --- /dev/null +++ openmp/libomptarget/test/api/omp_metadirective02.c @@ -0,0 +1,21 @@ +// RUN: %libomptarget-compile-run-and-check-generic + +#define N 100 +#include +#include +#include + +int main() { + double d[N]; + + float my_pi = 3.1415; + + for (int idev = 0; idev < omp_get_num_devices(); idev ++) { +#pragma omp target device(idev) +#pragma omp metadirective when(implementation={vendor(nvidia)}, device={arch("kepler")}: teams num_teams(512) thread_limit(32) map(tofrom: d[0:N])) when(implementation={vendor(amd)}, device={arch("fiji")}: teams num_teams(512) thread_limit(64) map(tofrom: d[0:N])) default(teams) + #pragma omp distribute parallel for + for (int i = 0; i < N; i ++) + d[i] = exp((M_PI - my_pi) * i); + } + return 0; +} Index: openmp/libomptarget/test/api/omp_metadirective03.c =================================================================== --- /dev/null +++ openmp/libomptarget/test/api/omp_metadirective03.c @@ -0,0 +1,59 @@ +// RUN: %libomptarget-compile-run-and-check-generic + +#include +#include +#include + +#define N 1048576 +enum Operation {MAX, MIN, NONE}; + +void set_monotonic_array(int a[], int size) { + for (int i = 0; i < size; i++) + a[i] = i; +} + +int test_kernel(Operation op) { + int a[N]; + set_monotonic_array(a, N); + int res = a[N/2]; + +#pragma omp metadirective \ + when (user = {condition(op == MAX)} : target teams distribute parallel for reduction(max: res)) \ + when (user = {condition(op == MIN)} : target teams distribute parallel for reduction(min: res)) \ + otherwise (parallel for private(res)) + for (int i = 0; i < N; i++) { + if (op == MAX) + res = (a[i] > res) ? a[i] : res; // Partial max (per-thread) + else if (op == MIN) + res = (a[i] < res) ? a[i] : res; // Partial min (per-thread) + else + res = -42; // No-op (assignment to private variable) + } + + int expc; + if (op == MAX) + expc = a[N-1]; + else if (op == MIN) + expc = a[0]; + else + expc = a[N/2]; + + if (res != expc) { + fprintf(stderr, "Error: Result = %d while expected = %d for operation = %d\n", res, expc, op); + return -1; + } else { + fprintf(stderr, "Pass: Result = expected = %d for operation = %d\n", res, op); + } + return 0; +} + +int main() { + if (test_kernel(MAX) == -1) + return -1; + if (test_kernel(MIN) == -1) + return -1; + if (test_kernel(NONE) == -1) + return -1; + + return 0; +} Index: openmp/libomptarget/test/api/omp_metadirective04.c =================================================================== --- /dev/null +++ openmp/libomptarget/test/api/omp_metadirective04.c @@ -0,0 +1,58 @@ +// RUN: %libomptarget-compile-run-and-check-generic + +#include +#include +#include + +#define N 1048576 + +void set_monotonic_array(int a[], int size) { + for (int i = 0; i < size; i++) + a[i] = i; +} + +int test_kernel() { + int a[N]; + set_monotonic_array(a, N); + int res = a[N/2]; + + omp_interop_t obj = omp_interop_none; + int dev = omp_get_default_device(); + +#pragma omp interop init(targetsync: obj) device(dev) + int id = (int) omp_get_interop_int(obj, omp_ipr_fr_id, NULL); + +#pragma omp metadirective \ + when (user = {condition(id == omp_ifr_cuda)} : target teams distribute parallel for reduction(max: res)) \ + when (user = {condition(id == omp_ifr_hip)} : target teams distribute parallel for reduction(min: res)) \ + otherwise (parallel for private(res)) + for (int i = 0; i < N; i++) { + if (id == omp_ifr_cuda) + res = (a[i] > res) ? a[i] : res; // Partial max (per-thread) + else if (id == omp_ifr_hip) + res = (a[i] < res) ? a[i] : res; // Partial min (per-thread) + else + res = -42; // No-op (assignment to private variable) + } + + int expc; + if (id == omp_ifr_cuda) + expc = a[N-1]; + else if (id == omp_ifr_hip) + expc = a[0]; + else + expc = a[N/2]; + + if (res != expc) { + fprintf(stderr, "Error: Result = %d while expected = %d for omp_get_interop_int() = %d\n", res, expc, id); + return -1; + } else { + fprintf(stderr, "Pass: Result = expected = %d for omp_get_interop_int() = %d\n", res, id); + } + return 0; +} + +int main() { + int status = test_kernel(); + return status; +} Index: openmp/libomptarget/test/api/omp_metadirective05.c =================================================================== --- /dev/null +++ openmp/libomptarget/test/api/omp_metadirective05.c @@ -0,0 +1,94 @@ +// RUN: %libomptarget-compile-run-and-check-generic + + +#include +#include +#include + +#define N 1048576 +enum Operation {MAX, MIN, NONE}; + +void set_monotonic_array(int a[], int size) { + for (int i = 0; i < size; i++) + a[i] = i; +} + +int test_kernel(Operation op) { + int a[N]; + set_monotonic_array(a, N); + int res1 = a[N/2]; + int res2 = a[N/2]; + + omp_interop_t obj = omp_interop_none; + int dev = omp_get_default_device(); + +#pragma omp interop init(targetsync: obj) device(dev) + int id = (int) omp_get_interop_int(obj, omp_ipr_fr_id, NULL); + +#pragma omp metadirective \ + when (user = {condition(id == omp_ifr_cuda && op == MAX)} : target teams distribute parallel for reduction(max: res1) reduction(max: res2)) \ + when (user = {condition(id == omp_ifr_cuda && op == MIN)} : target teams distribute parallel for reduction(max: res1) reduction(min: res2)) \ + when (user = {condition(id == omp_ifr_cuda && op == NONE)} : target teams distribute parallel for reduction(max: res1) private(res2)) \ + when (user = {condition(id == omp_ifr_hip && op == MAX)} : target teams distribute parallel for reduction(min: res1) reduction(max: res2)) \ + when (user = {condition(id == omp_ifr_hip && op == MIN)} : target teams distribute parallel for reduction(min: res1) reduction(min: res2)) \ + when (user = {condition(id == omp_ifr_hip && op == NONE)} : target teams distribute parallel for reduction(min: res1) private(res2)) \ + default (parallel for private(res1, res2)) + + for (int i = 0; i < N; i++) { + if (id == omp_ifr_cuda) + res1 = (a[i] > res1) ? a[i] : res1; // Partial max (per-thread) + else if (id == omp_ifr_hip) + res1 = (a[i] < res1) ? a[i] : res1; // Partial min (per-thread) + else + res1 = -42; // No-op (assignment to private variable) + + if (op == MAX) + res2 = (a[i] > res2) ? a[i] : res2; // Partial max (per-thread) + else if (op == MIN) + res2 = (a[i] < res2) ? a[i] : res2; // Partial min (per-thread) + else + res2 = -42; // No-op (assignment to private variable) + } + + int expc1; + if (id == omp_ifr_cuda) + expc1 = a[N-1]; + else if (id == omp_ifr_hip) + expc1 = a[0]; + else + expc1 = a[N/2]; + + if (res1 != expc1) { + fprintf(stderr, "Error: Result = %d while expected = %d for omp_get_interop_int() = %d\n", res1, expc1, id); + return -1; + } else { + fprintf(stderr, "Pass: Result = expected = %d for omp_get_interop_int() = %d\n", res1, id); + } + + int expc2; + if (op == MAX) + expc2 = a[N-1]; + else if (op == MIN) + expc2 = a[0]; + else + expc2 = a[N/2]; + + if (res2 != expc2) { + fprintf(stderr, "Error: Result = %d while expected = %d for operation = %d\n", res2, expc2, op); + return -1; + } else { + fprintf(stderr, "Pass: Result = expected = %d for operation = %d\n", res2, op); + } + return 0; +} + +int main() { + if (test_kernel(MAX) == -1) + return -1; + if (test_kernel(MIN) == -1) + return -1; + if (test_kernel(NONE) == -1) + return -1; + + return 0; +} Index: openmp/libomptarget/test/api/omp_metadirective06.c =================================================================== --- /dev/null +++ openmp/libomptarget/test/api/omp_metadirective06.c @@ -0,0 +1,85 @@ +// RUN: %libomptarget-compile-run-and-check-generic + + +#include +#include +#include + +#define N 1048576 +enum Operation {MAX, MIN, NONE}; + +void set_monotonic_array(int a[], int size) { + for (int i = 0; i < size; i++) + a[i] = i; +} + +int test_kernel(Operation op) { + int a[N]; + set_monotonic_array(a, N); + int res = a[N/2]; + + omp_interop_t obj; + int dev, id; + if (op == NONE) { + obj = omp_interop_none; + dev = omp_get_default_device(); + +#pragma omp interop init(targetsync: obj) device(dev) + } + + if (op == NONE) { + id = (int) omp_get_interop_int(obj, omp_ipr_fr_id, NULL); + } + +#pragma omp metadirective \ + when (user = {condition(op == MAX || op == NONE && id == omp_ifr_cuda)} : target teams distribute parallel for reduction(max: res)) \ + when (user = {condition(op == MIN || op == NONE && id == omp_ifr_hip)} : target teams distribute parallel for reduction(min: res)) \ + otherwise (parallel for private(res)) + for (int i = 0; i < N; i++) { + if (op == MAX || op == NONE && id == omp_ifr_cuda) + res = (a[i] > res) ? a[i] : res; // Partial max (per-thread) + else if (op == MIN || op == NONE && id == omp_ifr_hip) + res = (a[i] < res) ? a[i] : res; // Partial min (per-thread) + else + res = -42; // No-op (assignment to private variable) + } + + if (op == NONE) { +#pragma omp interop destroy(obj)) + } + + int expc; + if (op == MAX || op == NONE && id == omp_ifr_cuda) + expc = a[N-1]; + else if (op == MIN || op == NONE && id == omp_ifr_hip) + expc = a[0]; + else + expc = a[N/2]; + + printf("expc = %d res = %d\n ", expc, res); + + if (res != expc) { + if (op == NONE) + fprintf(stderr, "Error: Result = %d while expected = %d for operation = %d and omp_get_interop_int() = %d\n", res, expc, op, id); + else + fprintf(stderr, "Error: Result = %d while expected = %d for operation = %d\n", res, expc, op); + return -1; + } else { + if (op == NONE) + fprintf(stderr, "Pass: Result = expected = %d for operation = %d and omp_get_interop_int() = %d\n", res, op, id); + else + fprintf(stderr, "Pass: Result = expected = %d for operation = %d\n", res, op); + } + return 0; +} + +int main() { + if (test_kernel(MAX) == -1) + return -1; + if (test_kernel(MIN) == -1) + return -1; + if (test_kernel(NONE) == -1) + return -1; + + return 0; +} Index: openmp/libomptarget/test/api/omp_metadirective07.c =================================================================== --- /dev/null +++ openmp/libomptarget/test/api/omp_metadirective07.c @@ -0,0 +1,60 @@ +// RUN: %libomptarget-compile-run-and-check-generic + + +#include +#include +#include + +#define N 1048576 +enum Operation {MAX, MIN, NONE}; + +void set_monotonic_array(int a[], int size) { + for (int i = 0; i < size; i++) + a[i] = i; +} + +int test_kernel(Operation op) { + int a[N]; + set_monotonic_array(a, N); + int res = a[N/2]; + +#pragma omp metadirective \ + when (user = {condition(op == NONE)} : parallel for private(res)) \ + when (user = {condition(op == MIN)} : target teams distribute parallel for reduction(min: res)) \ + otherwise (target teams distribute parallel for reduction(max: res)) + for (int i = 0; i < N; i++) { + if (op == MAX) + res = (a[i] > res) ? a[i] : res; // Partial max (per-thread) + else if (op == MIN) + res = (a[i] < res) ? a[i] : res; // Partial min (per-thread) + else + res = -42; // No-op (assignment to private variable) + } + + int expc; + if (op == MAX) + expc = a[N-1]; + else if (op == MIN) + expc = a[0]; + else + expc = a[N/2]; + + if (res != expc) { + fprintf(stderr, "Error: Result = %d while expected = %d for operation = %d\n", res, expc, op); + return -1; + } else { + fprintf(stderr, "Pass: Result = expected = %d for operation = %d\n", res, op); + } + return 0; +} + +int main() { + if (test_kernel(MAX) == -1) + return -1; + if (test_kernel(MIN) == -1) + return -1; + if (test_kernel(NONE) == -1) + return -1; + + return 0; +}