diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -2866,6 +2866,7 @@ * ``__c11_atomic_fetch_and`` * ``__c11_atomic_fetch_or`` * ``__c11_atomic_fetch_xor`` +* ``__c11_atomic_fetch_nand`` (Nand is not presented in ````) * ``__c11_atomic_fetch_max`` * ``__c11_atomic_fetch_min`` diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def --- a/clang/include/clang/Basic/Builtins.def +++ b/clang/include/clang/Basic/Builtins.def @@ -796,6 +796,7 @@ ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_fetch_nand, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_max, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_min, "v.", "t") BUILTIN(__c11_atomic_thread_fence, "vi", "n") diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -4695,6 +4695,7 @@ case AO__c11_atomic_fetch_and: case AO__c11_atomic_fetch_or: case AO__c11_atomic_fetch_xor: + case AO__c11_atomic_fetch_nand: case AO__c11_atomic_fetch_max: case AO__c11_atomic_fetch_min: case AO__atomic_fetch_add: diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -664,6 +664,7 @@ case AtomicExpr::AO__atomic_nand_fetch: PostOp = llvm::Instruction::And; // the NOT is special cased below LLVM_FALLTHROUGH; + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: Op = llvm::AtomicRMWInst::Nand; break; @@ -906,6 +907,7 @@ case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__c11_atomic_fetch_max: case AtomicExpr::AO__c11_atomic_fetch_min: case AtomicExpr::AO__opencl_atomic_fetch_and: @@ -972,6 +974,7 @@ case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__opencl_atomic_fetch_sub: @@ -1211,6 +1214,7 @@ case AtomicExpr::AO__atomic_nand_fetch: PostOp = llvm::Instruction::And; // the NOT is special cased below LLVM_FALLTHROUGH; + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: LibCallName = "__atomic_fetch_nand"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -5287,6 +5287,7 @@ case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__opencl_atomic_fetch_and: case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__opencl_atomic_fetch_xor: diff --git a/clang/test/Sema/atomic-implicit-seq_cst.c b/clang/test/Sema/atomic-implicit-seq_cst.c --- a/clang/test/Sema/atomic-implicit-seq_cst.c +++ b/clang/test/Sema/atomic-implicit-seq_cst.c @@ -178,6 +178,14 @@ return i | atom; // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} } +int bad_bitnand_1(int i) { + return ~(atom & i); // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} +} + +int bad_bitnand_2(int i) { + return ~(i & atom); // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} +} + int bad_and_1(int i) { return atom && i; // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} } @@ -315,6 +323,7 @@ int good_c11_atomic_fetch_and(int i) { return __c11_atomic_fetch_and(&atom, i, __ATOMIC_RELAXED); } int good_c11_atomic_fetch_or(int i) { return __c11_atomic_fetch_or(&atom, i, __ATOMIC_RELAXED); } int good_c11_atomic_fetch_xor(int i) { return __c11_atomic_fetch_xor(&atom, i, __ATOMIC_RELAXED); } +int good_c11_atomic_fetch_nand(int i) { return __c11_atomic_fetch_nand(&atom, i, __ATOMIC_RELAXED); } void good_cast_to_void(void) { (void)atom; } _Atomic(int) * good_address_of(void) { return &atom; } diff --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c --- a/clang/test/Sema/atomic-ops.c +++ b/clang/test/Sema/atomic-ops.c @@ -362,6 +362,13 @@ (void)__c11_atomic_fetch_xor(Ap, val, memory_order_acq_rel); (void)__c11_atomic_fetch_xor(Ap, val, memory_order_seq_cst); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_relaxed); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_acquire); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_consume); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_release); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_acq_rel); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_seq_cst); + (void)__c11_atomic_fetch_min(Ap, val, memory_order_relaxed); (void)__c11_atomic_fetch_min(Ap, val, memory_order_acquire); (void)__c11_atomic_fetch_min(Ap, val, memory_order_consume); @@ -602,6 +609,8 @@ (void)__c11_atomic_fetch_or((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} (void)__c11_atomic_fetch_xor((volatile _Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} (void)__c11_atomic_fetch_xor((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} + (void)__c11_atomic_fetch_nand((volatile _Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} + (void)__c11_atomic_fetch_nand((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} __atomic_store_n((volatile int*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} __atomic_store_n((int*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} @@ -680,6 +689,8 @@ (void)__c11_atomic_fetch_or(&ai, 0, memory_order_relaxed); (void)__c11_atomic_fetch_xor(&vai, 0, memory_order_relaxed); (void)__c11_atomic_fetch_xor(&ai, 0, memory_order_relaxed); + (void)__c11_atomic_fetch_nand(&vai, 0, memory_order_relaxed); + (void)__c11_atomic_fetch_nand(&ai, 0, memory_order_relaxed); // Ditto. __atomic_store_n(&vi, 0, memory_order_relaxed); diff --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c --- a/compiler-rt/lib/builtins/atomic.c +++ b/compiler-rt/lib/builtins/atomic.c @@ -336,6 +336,18 @@ return tmp; \ } +#define ATOMIC_RMW_NAND(n, lockfree, type) \ + type __atomic_fetch_nand_##n(type *ptr, type val, int model) { \ + if (lockfree(ptr)) \ + return __c11_atomic_fetch_nand((_Atomic(type) *)ptr, val, model); \ + Lock *l = lock_for_pointer(ptr); \ + lock(l); \ + type tmp = *ptr; \ + *ptr = ~(tmp & val); \ + unlock(l); \ + return tmp; \ + } + #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +) OPTIMISED_CASES #undef OPTIMISED_CASE @@ -351,3 +363,6 @@ #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^) OPTIMISED_CASES #undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW_NAND(n, lockfree, type) +OPTIMISED_CASES +#undef OPTIMISED_CASE diff --git a/compiler-rt/test/builtins/Unit/atomic_test.c b/compiler-rt/test/builtins/Unit/atomic_test.c --- a/compiler-rt/test/builtins/Unit/atomic_test.c +++ b/compiler-rt/test/builtins/Unit/atomic_test.c @@ -96,6 +96,11 @@ uint32_t __atomic_fetch_xor_4(uint32_t *ptr, uint32_t val, int model); uint64_t __atomic_fetch_xor_8(uint64_t *ptr, uint64_t val, int model); +uint8_t __atomic_fetch_nand_1(uint8_t *ptr, uint8_t val, int model); +uint16_t __atomic_fetch_nand_2(uint16_t *ptr, uint16_t val, int model); +uint32_t __atomic_fetch_nand_4(uint32_t *ptr, uint32_t val, int model); +uint64_t __atomic_fetch_nand_8(uint64_t *ptr, uint64_t val, int model); + // We conditionally test the *_16 atomic function variants based on the same // condition that compiler_rt (atomic.c) uses to conditionally generate them. // Currently atomic.c tests if __SIZEOF_INT128__ is defined (which can be the @@ -119,6 +124,7 @@ uint128_t __atomic_fetch_and_16(uint128_t *ptr, uint128_t val, int model); uint128_t __atomic_fetch_or_16(uint128_t *ptr, uint128_t val, int model); uint128_t __atomic_fetch_xor_16(uint128_t *ptr, uint128_t val, int model); +uint128_t __atomic_fetch_nand_16(uint128_t *ptr, uint128_t val, int model); #else typedef uint64_t maxuint_t; #endif @@ -540,6 +546,28 @@ abort(); #endif + // Fetch nand. + + set_a_values(V + m); + set_b_values(0); + b8 = __atomic_fetch_nand_1(&a8, U8(ONES), model); + if (b8 != U8(V + m) || a8 != U8(~((V + m) & ONES))) + abort(); + b16 = __atomic_fetch_nand_2(&a16, U16(ONES), model); + if (b16 != U16(V + m) || a16 != U16(~((V + m) & ONES))) + abort(); + b32 = __atomic_fetch_nand_4(&a32, U32(ONES), model); + if (b32 != U32(V + m) || a32 != U32(~((V + m) & ONES))) + abort(); + b64 = __atomic_fetch_nand_8(&a64, U64(ONES), model); + if (b64 != U64(V + m) || a64 != U64(~((V + m) & ONES))) + abort(); +#ifdef TEST_16 + b128 = __atomic_fetch_nand_16(&a128, ONES, model); + if (b128 != (V + m) || a128 != ~((V + m) & ONES)) + abort(); +#endif + // Check signed integer overflow behavior set_a_values(V + m);