diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -15590,6 +15590,15 @@ auto Pair = EmitAtomicCompareExchange( LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(), llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true); + // FIXME: + // Unlike c11's atomic_compare_exchange, accroding to + // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp + // > In either case, the contents of the memory location specified by addr + // > are copied into the memory location specified by old_val_addr. + // But it hasn't specified storing to OldValAddr is atomic or not and + // which order to use. + Value *LoadedVal = Pair.first.getScalarVal(); + Builder.CreateStore(LoadedVal, OldValAddr); return Pair.second; } case PPC::BI__builtin_ppc_fetch_and_add: diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-cas.c b/clang/test/CodeGen/builtins-ppc-xlcompat-cas.c --- a/clang/test/CodeGen/builtins-ppc-xlcompat-cas.c +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-cas.c @@ -19,6 +19,7 @@ // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg weak volatile i32* [[A_ADDR]], i32 [[TMP1]], i32 [[TMP0]] monotonic monotonic, align 4 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i1 } [[TMP2]], 0 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1 +// CHECK-NEXT: store i32 [[TMP3]], i32* [[B_ADDR]], align 4 // CHECK-NEXT: ret void // void test_builtin_ppc_compare_and_swap(int a, int b, int c) { @@ -39,6 +40,7 @@ // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg weak volatile i64* [[A_ADDR]], i64 [[TMP1]], i64 [[TMP0]] monotonic monotonic, align 8 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0 // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1 +// CHECK-NEXT: store i64 [[TMP3]], i64* [[B_ADDR]], align 8 // CHECK-NEXT: ret void // void test_builtin_ppc_compare_and_swaplp(long a, long b, long c) {