Index: llvm/test/CodeGen/PowerPC/atomics-regression.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/atomics-regression.ll @@ -0,0 +1,9546 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=powerpc64le-linux-gnu < %s | FileCheck %s -check-prefix=PPC64LE + +define i8 @test0(i8* %ptr) { +; PPC64LE-LABEL: test0: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lbz 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i8, i8* %ptr unordered, align 1 + ret i8 %val +} + +define i8 @test1(i8* %ptr) { +; PPC64LE-LABEL: test1: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lbz 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i8, i8* %ptr monotonic, align 1 + ret i8 %val +} + +define i8 @test2(i8* %ptr) { +; PPC64LE-LABEL: test2: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lbz 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i8, i8* %ptr acquire, align 1 + ret i8 %val +} + +define i8 @test3(i8* %ptr) { +; PPC64LE-LABEL: test3: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: ori 2, 2, 0 +; PPC64LE-NEXT: lbz 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i8, i8* %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @test4(i16* %ptr) { +; PPC64LE-LABEL: test4: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lhz 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i16, i16* %ptr unordered, align 2 + ret i16 %val +} + +define i16 @test5(i16* %ptr) { +; PPC64LE-LABEL: test5: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lhz 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i16, i16* %ptr monotonic, align 2 + ret i16 %val +} + +define i16 @test6(i16* %ptr) { +; PPC64LE-LABEL: test6: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lhz 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i16, i16* %ptr acquire, align 2 + ret i16 %val +} + +define i16 @test7(i16* %ptr) { +; PPC64LE-LABEL: test7: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: ori 2, 2, 0 +; PPC64LE-NEXT: lhz 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i16, i16* %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @test8(i32* %ptr) { +; PPC64LE-LABEL: test8: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwz 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i32, i32* %ptr unordered, align 4 + ret i32 %val +} + +define i32 @test9(i32* %ptr) { +; PPC64LE-LABEL: test9: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwz 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i32, i32* %ptr monotonic, align 4 + ret i32 %val +} + +define i32 @test10(i32* %ptr) { +; PPC64LE-LABEL: test10: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwz 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i32, i32* %ptr acquire, align 4 + ret i32 %val +} + +define i32 @test11(i32* %ptr) { +; PPC64LE-LABEL: test11: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: ori 2, 2, 0 +; PPC64LE-NEXT: lwz 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i32, i32* %ptr seq_cst, align 4 + ret i32 %val +} + +define i64 @test12(i64* %ptr) { +; PPC64LE-LABEL: test12: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: ld 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i64, i64* %ptr unordered, align 8 + ret i64 %val +} + +define i64 @test13(i64* %ptr) { +; PPC64LE-LABEL: test13: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: ld 3, 0(3) +; PPC64LE-NEXT: blr + %val = load atomic i64, i64* %ptr monotonic, align 8 + ret i64 %val +} + +define i64 @test14(i64* %ptr) { +; PPC64LE-LABEL: test14: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: ld 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i64, i64* %ptr acquire, align 8 + ret i64 %val +} + +define i64 @test15(i64* %ptr) { +; PPC64LE-LABEL: test15: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: ori 2, 2, 0 +; PPC64LE-NEXT: ld 3, 0(3) +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %val = load atomic i64, i64* %ptr seq_cst, align 8 + ret i64 %val +} + +define void @test16(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test16: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: stb 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i8 %val, i8* %ptr unordered, align 1 + ret void +} + +define void @test17(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test17: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: stb 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i8 %val, i8* %ptr monotonic, align 1 + ret void +} + +define void @test18(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test18: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: stb 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i8 %val, i8* %ptr release, align 1 + ret void +} + +define void @test19(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test19: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: stb 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i8 %val, i8* %ptr seq_cst, align 1 + ret void +} + +define void @test20(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test20: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sth 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i16 %val, i16* %ptr unordered, align 2 + ret void +} + +define void @test21(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test21: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sth 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i16 %val, i16* %ptr monotonic, align 2 + ret void +} + +define void @test22(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test22: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: sth 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i16 %val, i16* %ptr release, align 2 + ret void +} + +define void @test23(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test23: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: sth 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i16 %val, i16* %ptr seq_cst, align 2 + ret void +} + +define void @test24(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test24: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: stw 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i32 %val, i32* %ptr unordered, align 4 + ret void +} + +define void @test25(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test25: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: stw 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i32 %val, i32* %ptr monotonic, align 4 + ret void +} + +define void @test26(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test26: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: stw 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i32 %val, i32* %ptr release, align 4 + ret void +} + +define void @test27(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test27: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: stw 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i32 %val, i32* %ptr seq_cst, align 4 + ret void +} + +define void @test28(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test28: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: std 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i64 %val, i64* %ptr unordered, align 8 + ret void +} + +define void @test29(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test29: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: std 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i64 %val, i64* %ptr monotonic, align 8 + ret void +} + +define void @test30(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test30: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: std 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i64 %val, i64* %ptr release, align 8 + ret void +} + +define void @test31(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test31: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: std 4, 0(3) +; PPC64LE-NEXT: blr + store atomic i64 %val, i64* %ptr seq_cst, align 8 + ret void +} + +define void @test32() { +; PPC64LE-LABEL: test32: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + fence acquire + ret void +} + +define void @test33() { +; PPC64LE-LABEL: test33: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + fence release + ret void +} + +define void @test34() { +; PPC64LE-LABEL: test34: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + fence acq_rel + ret void +} + +define void @test35() { +; PPC64LE-LABEL: test35: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: blr + fence seq_cst + ret void +} + +define void @test36() { +; PPC64LE-LABEL: test36: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + fence singlethread acquire + ret void +} + +define void @test37() { +; PPC64LE-LABEL: test37: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + fence singlethread release + ret void +} + +define void @test38() { +; PPC64LE-LABEL: test38: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + fence singlethread acq_rel + ret void +} + +define void @test39() { +; PPC64LE-LABEL: test39: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: blr + fence singlethread seq_cst + ret void +} + +define void @test40(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test40: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB40_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB40_1: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB40_2 +; PPC64LE-NEXT: .LBB40_2: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB40_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic + ret void +} + +define void @test41(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test41: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB41_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB41_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB41_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB41_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic + ret void +} + +define void @test42(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test42: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB42_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB42_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB42_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB42_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire + ret void +} + +define void @test43(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test43: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB43_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB43_1: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB43_2 +; PPC64LE-NEXT: .LBB43_2: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB43_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic + ret void +} + +define void @test44(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test44: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB44_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB44_1: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB44_2 +; PPC64LE-NEXT: .LBB44_2: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB44_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire + ret void +} + +define void @test45(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test45: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB45_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB45_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB45_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB45_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic + ret void +} + +define void @test46(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test46: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB46_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB46_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB46_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB46_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire + ret void +} + +define void @test47(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test47: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB47_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB47_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB47_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB47_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic + ret void +} + +define void @test48(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test48: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB48_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB48_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB48_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB48_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire + ret void +} + +define void @test49(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test49: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB49_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB49_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB49_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB49_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst + ret void +} + +define void @test50(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test50: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB50_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB50_1: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB50_2 +; PPC64LE-NEXT: .LBB50_2: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB50_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic + ret void +} + +define void @test51(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test51: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB51_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB51_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB51_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB51_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic + ret void +} + +define void @test52(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test52: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB52_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB52_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB52_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB52_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire + ret void +} + +define void @test53(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test53: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB53_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB53_1: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB53_2 +; PPC64LE-NEXT: .LBB53_2: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB53_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic + ret void +} + +define void @test54(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test54: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB54_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB54_1: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB54_2 +; PPC64LE-NEXT: .LBB54_2: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB54_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire + ret void +} + +define void @test55(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test55: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB55_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB55_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB55_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB55_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic + ret void +} + +define void @test56(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test56: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB56_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB56_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB56_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB56_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire + ret void +} + +define void @test57(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test57: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB57_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB57_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB57_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB57_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic + ret void +} + +define void @test58(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test58: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB58_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB58_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB58_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB58_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire + ret void +} + +define void @test59(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test59: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB59_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB59_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB59_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB59_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst + ret void +} + +define void @test60(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test60: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB60_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB60_1: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB60_2 +; PPC64LE-NEXT: .LBB60_2: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB60_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic + ret void +} + +define void @test61(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test61: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB61_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB61_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB61_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB61_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic + ret void +} + +define void @test62(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test62: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB62_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB62_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB62_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB62_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire + ret void +} + +define void @test63(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test63: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB63_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB63_1: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB63_2 +; PPC64LE-NEXT: .LBB63_2: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB63_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic + ret void +} + +define void @test64(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test64: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB64_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB64_1: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB64_2 +; PPC64LE-NEXT: .LBB64_2: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB64_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire + ret void +} + +define void @test65(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test65: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB65_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB65_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB65_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB65_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic + ret void +} + +define void @test66(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test66: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB66_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB66_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB66_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB66_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire + ret void +} + +define void @test67(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test67: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB67_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB67_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB67_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB67_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic + ret void +} + +define void @test68(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test68: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB68_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB68_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB68_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB68_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire + ret void +} + +define void @test69(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test69: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB69_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB69_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB69_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB69_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst + ret void +} + +define void @test70(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test70: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB70_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB70_1: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB70_2 +; PPC64LE-NEXT: .LBB70_2: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: beq 0, .LBB70_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic + ret void +} + +define void @test71(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test71: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB71_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB71_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB71_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB71_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic + ret void +} + +define void @test72(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test72: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB72_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB72_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB72_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB72_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire + ret void +} + +define void @test73(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test73: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB73_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB73_1: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB73_2 +; PPC64LE-NEXT: .LBB73_2: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: beq 0, .LBB73_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic + ret void +} + +define void @test74(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test74: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB74_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB74_1: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB74_2 +; PPC64LE-NEXT: .LBB74_2: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: beq 0, .LBB74_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire + ret void +} + +define void @test75(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test75: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB75_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB75_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB75_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB75_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic + ret void +} + +define void @test76(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test76: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB76_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB76_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB76_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB76_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire + ret void +} + +define void @test77(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test77: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB77_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB77_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB77_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB77_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic + ret void +} + +define void @test78(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test78: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB78_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB78_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB78_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB78_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire + ret void +} + +define void @test79(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test79: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB79_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB79_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB79_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB79_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst + ret void +} + +define void @test80(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test80: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB80_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB80_1: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB80_2 +; PPC64LE-NEXT: .LBB80_2: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB80_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread monotonic monotonic + ret void +} + +define void @test81(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test81: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB81_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB81_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB81_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB81_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire monotonic + ret void +} + +define void @test82(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test82: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB82_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB82_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB82_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB82_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire acquire + ret void +} + +define void @test83(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test83: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB83_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB83_1: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB83_2 +; PPC64LE-NEXT: .LBB83_2: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB83_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release monotonic + ret void +} + +define void @test84(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test84: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB84_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB84_1: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB84_2 +; PPC64LE-NEXT: .LBB84_2: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB84_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release acquire + ret void +} + +define void @test85(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test85: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB85_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB85_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB85_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB85_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel monotonic + ret void +} + +define void @test86(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test86: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB86_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB86_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB86_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB86_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel acquire + ret void +} + +define void @test87(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test87: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB87_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB87_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB87_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB87_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst monotonic + ret void +} + +define void @test88(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test88: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB88_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB88_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB88_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB88_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst acquire + ret void +} + +define void @test89(i8* %ptr, i8 %cmp, i8 %val) { +; PPC64LE-LABEL: test89: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB89_1: +; PPC64LE-NEXT: lbarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB89_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB89_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB89_4: +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst seq_cst + ret void +} + +define void @test90(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test90: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB90_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB90_1: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB90_2 +; PPC64LE-NEXT: .LBB90_2: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB90_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread monotonic monotonic + ret void +} + +define void @test91(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test91: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB91_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB91_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB91_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB91_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire monotonic + ret void +} + +define void @test92(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test92: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB92_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB92_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB92_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB92_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire acquire + ret void +} + +define void @test93(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test93: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB93_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB93_1: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB93_2 +; PPC64LE-NEXT: .LBB93_2: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB93_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release monotonic + ret void +} + +define void @test94(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test94: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB94_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB94_1: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB94_2 +; PPC64LE-NEXT: .LBB94_2: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB94_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release acquire + ret void +} + +define void @test95(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test95: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB95_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB95_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB95_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB95_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel monotonic + ret void +} + +define void @test96(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test96: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB96_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB96_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB96_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB96_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel acquire + ret void +} + +define void @test97(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test97: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB97_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB97_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB97_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB97_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst monotonic + ret void +} + +define void @test98(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test98: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB98_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB98_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB98_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB98_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst acquire + ret void +} + +define void @test99(i16* %ptr, i16 %cmp, i16 %val) { +; PPC64LE-LABEL: test99: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB99_1: +; PPC64LE-NEXT: lharx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB99_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB99_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB99_4: +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst seq_cst + ret void +} + +define void @test100(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test100: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB100_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB100_1: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB100_2 +; PPC64LE-NEXT: .LBB100_2: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB100_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread monotonic monotonic + ret void +} + +define void @test101(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test101: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB101_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB101_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB101_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB101_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire monotonic + ret void +} + +define void @test102(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test102: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB102_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB102_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB102_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB102_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire acquire + ret void +} + +define void @test103(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test103: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB103_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB103_1: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB103_2 +; PPC64LE-NEXT: .LBB103_2: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB103_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release monotonic + ret void +} + +define void @test104(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test104: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB104_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB104_1: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB104_2 +; PPC64LE-NEXT: .LBB104_2: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: beq 0, .LBB104_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release acquire + ret void +} + +define void @test105(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test105: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB105_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB105_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB105_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB105_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel monotonic + ret void +} + +define void @test106(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test106: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB106_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB106_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB106_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB106_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel acquire + ret void +} + +define void @test107(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test107: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB107_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB107_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB107_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB107_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst monotonic + ret void +} + +define void @test108(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test108: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB108_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB108_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB108_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB108_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst acquire + ret void +} + +define void @test109(i32* %ptr, i32 %cmp, i32 %val) { +; PPC64LE-LABEL: test109: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB109_1: +; PPC64LE-NEXT: lwarx 6, 0, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bne 0, .LBB109_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB109_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB109_4: +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst seq_cst + ret void +} + +define void @test110(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test110: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: b .LBB110_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB110_1: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB110_2 +; PPC64LE-NEXT: .LBB110_2: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: beq 0, .LBB110_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread monotonic monotonic + ret void +} + +define void @test111(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test111: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB111_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB111_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB111_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB111_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire monotonic + ret void +} + +define void @test112(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test112: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB112_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB112_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB112_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB112_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire acquire + ret void +} + +define void @test113(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test113: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB113_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB113_1: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB113_2 +; PPC64LE-NEXT: .LBB113_2: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: beq 0, .LBB113_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release monotonic + ret void +} + +define void @test114(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test114: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: b .LBB114_2 +; PPC64LE-NEXT: .p2align 5 +; PPC64LE-NEXT: .LBB114_1: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: beqlr 0 +; PPC64LE-NEXT: b .LBB114_2 +; PPC64LE-NEXT: .LBB114_2: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: beq 0, .LBB114_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release acquire + ret void +} + +define void @test115(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test115: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB115_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB115_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB115_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB115_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel monotonic + ret void +} + +define void @test116(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test116: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB116_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB116_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB116_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB116_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel acquire + ret void +} + +define void @test117(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test117: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB117_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB117_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB117_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB117_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst monotonic + ret void +} + +define void @test118(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test118: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB118_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB118_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB118_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB118_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst acquire + ret void +} + +define void @test119(i64* %ptr, i64 %cmp, i64 %val) { +; PPC64LE-LABEL: test119: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB119_1: +; PPC64LE-NEXT: ldarx 6, 0, 3 +; PPC64LE-NEXT: cmpd 4, 6 +; PPC64LE-NEXT: bne 0, .LBB119_4 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 5, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB119_1 +; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .LBB119_4: +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst seq_cst + ret void +} + +define i8 @test120(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test120: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB120_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB120_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test121(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test121: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB121_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB121_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test122(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test122: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB122_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB122_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test123(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test123: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB123_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB123_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test124(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test124: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB124_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB124_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test125(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test125: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB125_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB125_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test126(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test126: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB126_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB126_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test127(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test127: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB127_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB127_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test128(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test128: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB128_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB128_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test129(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test129: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB129_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB129_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test130(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test130: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB130_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB130_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test131(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test131: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB131_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB131_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test132(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test132: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB132_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB132_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test133(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test133: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB133_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB133_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test134(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test134: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB134_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB134_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test135(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test135: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB135_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB135_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test136(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test136: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB136_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB136_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test137(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test137: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB137_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB137_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test138(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test138: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB138_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB138_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test139(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test139: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB139_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB139_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test140(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test140: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB140_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB140_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test141(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test141: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB141_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB141_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test142(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test142: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB142_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB142_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test143(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test143: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB143_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB143_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test144(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test144: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB144_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB144_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test145(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test145: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB145_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB145_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test146(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test146: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB146_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB146_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test147(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test147: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB147_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB147_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test148(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test148: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB148_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB148_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test149(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test149: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB149_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB149_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test150(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test150: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB150_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB150_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test151(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test151: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB151_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB151_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test152(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test152: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB152_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB152_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test153(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test153: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB153_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB153_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test154(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test154: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB154_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB154_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test155(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test155: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB155_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB155_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test156(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test156: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB156_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB156_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test157(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test157: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB157_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB157_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test158(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test158: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB158_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB158_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test159(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test159: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB159_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB159_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test160(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test160: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB160_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB160_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test161(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test161: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB161_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: subf 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB161_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test162(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test162: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB162_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB162_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test163(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test163: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB163_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB163_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test164(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test164: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB164_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB164_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test165(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test165: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB165_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB165_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test166(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test166: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB166_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: subf 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB166_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test167(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test167: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB167_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB167_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test168(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test168: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB168_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB168_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test169(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test169: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB169_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB169_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test170(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test170: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB170_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB170_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test171(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test171: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB171_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: subf 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB171_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test172(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test172: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB172_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB172_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test173(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test173: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB173_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB173_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test174(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test174: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB174_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB174_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test175(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test175: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB175_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB175_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test176(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test176: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB176_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: sub 6, 3, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB176_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test177(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test177: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB177_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB177_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test178(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test178: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB178_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB178_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test179(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test179: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB179_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB179_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test180(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test180: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB180_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB180_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test181(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test181: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB181_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB181_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test182(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test182: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB182_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB182_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test183(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test183: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB183_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB183_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test184(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test184: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB184_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB184_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test185(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test185: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB185_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB185_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test186(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test186: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB186_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB186_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test187(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test187: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB187_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB187_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test188(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test188: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB188_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB188_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test189(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test189: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB189_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB189_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test190(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test190: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB190_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB190_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test191(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test191: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB191_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB191_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test192(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test192: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB192_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB192_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test193(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test193: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB193_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB193_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test194(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test194: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB194_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB194_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test195(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test195: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB195_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB195_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test196(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test196: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB196_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB196_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test197(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test197: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB197_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB197_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test198(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test198: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB198_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB198_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test199(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test199: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB199_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB199_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test200(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test200: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB200_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB200_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test201(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test201: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB201_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB201_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test202(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test202: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB202_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB202_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test203(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test203: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB203_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB203_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test204(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test204: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB204_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB204_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test205(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test205: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB205_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB205_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test206(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test206: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB206_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB206_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test207(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test207: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB207_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB207_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test208(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test208: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB208_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB208_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test209(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test209: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB209_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB209_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test210(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test210: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB210_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB210_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test211(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test211: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB211_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB211_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test212(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test212: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB212_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB212_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test213(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test213: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB213_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB213_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test214(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test214: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB214_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB214_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test215(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test215: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB215_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB215_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test216(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test216: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB216_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB216_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test217(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test217: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB217_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB217_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test218(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test218: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB218_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB218_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test219(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test219: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB219_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB219_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test220(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test220: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB220_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB220_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test221(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test221: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB221_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB221_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test222(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test222: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB222_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB222_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test223(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test223: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB223_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB223_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test224(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test224: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB224_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB224_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test225(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test225: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB225_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB225_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test226(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test226: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB226_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB226_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test227(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test227: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB227_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB227_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test228(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test228: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB228_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB228_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test229(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test229: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB229_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB229_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test230(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test230: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB230_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB230_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test231(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test231: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB231_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB231_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test232(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test232: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB232_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB232_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test233(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test233: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB233_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB233_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test234(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test234: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB234_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB234_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test235(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test235: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB235_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB235_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test236(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test236: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB236_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB236_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test237(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test237: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB237_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB237_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test238(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test238: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB238_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB238_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test239(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test239: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB239_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB239_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test240(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test240: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB240_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB240_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test241(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test241: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB241_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB241_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test242(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test242: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB242_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB242_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test243(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test243: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB243_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB243_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test244(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test244: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB244_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB244_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test245(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test245: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB245_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB245_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test246(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test246: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB246_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB246_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test247(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test247: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB247_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB247_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test248(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test248: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB248_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB248_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test249(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test249: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB249_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB249_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test250(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test250: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB250_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB250_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test251(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test251: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB251_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB251_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test252(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test252: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB252_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB252_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test253(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test253: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB253_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB253_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test254(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test254: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB254_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB254_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test255(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test255: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB255_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB255_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test256(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test256: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB256_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB256_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test257(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test257: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB257_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB257_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test258(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test258: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB258_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB258_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test259(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test259: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB259_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB259_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test260(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test260: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB260_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB260_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB260_1 +; PPC64LE-NEXT: .LBB260_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test261(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test261: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB261_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: extsb 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB261_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB261_1 +; PPC64LE-NEXT: .LBB261_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test262(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test262: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB262_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB262_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB262_1 +; PPC64LE-NEXT: .LBB262_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test263(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test263: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB263_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB263_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB263_1 +; PPC64LE-NEXT: .LBB263_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test264(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test264: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB264_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB264_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB264_1 +; PPC64LE-NEXT: .LBB264_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test265(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test265: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB265_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB265_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB265_1 +; PPC64LE-NEXT: .LBB265_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test266(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test266: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB266_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: extsh 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB266_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB266_1 +; PPC64LE-NEXT: .LBB266_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test267(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test267: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB267_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB267_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB267_1 +; PPC64LE-NEXT: .LBB267_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test268(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test268: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB268_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB268_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB268_1 +; PPC64LE-NEXT: .LBB268_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test269(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test269: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB269_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB269_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB269_1 +; PPC64LE-NEXT: .LBB269_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test270(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test270: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB270_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB270_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB270_1 +; PPC64LE-NEXT: .LBB270_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test271(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test271: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB271_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmpw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB271_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB271_1 +; PPC64LE-NEXT: .LBB271_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test272(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test272: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB272_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB272_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB272_1 +; PPC64LE-NEXT: .LBB272_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test273(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test273: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB273_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB273_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB273_1 +; PPC64LE-NEXT: .LBB273_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test274(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test274: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB274_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB274_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB274_1 +; PPC64LE-NEXT: .LBB274_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test275(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test275: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB275_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB275_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB275_1 +; PPC64LE-NEXT: .LBB275_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test276(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test276: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB276_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpd 4, 3 +; PPC64LE-NEXT: ble 0, .LBB276_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB276_1 +; PPC64LE-NEXT: .LBB276_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test277(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test277: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB277_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB277_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB277_1 +; PPC64LE-NEXT: .LBB277_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test278(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test278: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB278_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB278_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB278_1 +; PPC64LE-NEXT: .LBB278_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test279(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test279: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB279_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB279_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB279_1 +; PPC64LE-NEXT: .LBB279_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test280(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test280: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB280_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB280_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB280_1 +; PPC64LE-NEXT: .LBB280_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test281(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test281: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB281_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: extsb 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB281_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB281_1 +; PPC64LE-NEXT: .LBB281_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test282(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test282: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB282_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB282_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB282_1 +; PPC64LE-NEXT: .LBB282_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test283(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test283: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB283_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB283_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB283_1 +; PPC64LE-NEXT: .LBB283_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test284(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test284: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB284_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB284_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB284_1 +; PPC64LE-NEXT: .LBB284_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test285(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test285: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB285_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB285_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB285_1 +; PPC64LE-NEXT: .LBB285_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test286(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test286: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB286_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: extsh 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB286_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB286_1 +; PPC64LE-NEXT: .LBB286_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test287(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test287: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB287_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB287_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB287_1 +; PPC64LE-NEXT: .LBB287_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test288(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test288: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB288_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB288_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB288_1 +; PPC64LE-NEXT: .LBB288_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test289(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test289: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB289_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB289_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB289_1 +; PPC64LE-NEXT: .LBB289_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test290(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test290: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB290_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB290_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB290_1 +; PPC64LE-NEXT: .LBB290_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test291(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test291: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB291_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmpw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB291_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB291_1 +; PPC64LE-NEXT: .LBB291_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test292(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test292: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB292_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB292_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB292_1 +; PPC64LE-NEXT: .LBB292_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test293(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test293: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB293_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB293_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB293_1 +; PPC64LE-NEXT: .LBB293_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test294(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test294: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB294_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB294_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB294_1 +; PPC64LE-NEXT: .LBB294_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test295(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test295: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB295_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB295_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB295_1 +; PPC64LE-NEXT: .LBB295_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test296(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test296: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB296_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpd 4, 3 +; PPC64LE-NEXT: bge 0, .LBB296_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB296_1 +; PPC64LE-NEXT: .LBB296_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test297(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test297: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB297_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB297_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB297_1 +; PPC64LE-NEXT: .LBB297_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test298(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test298: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB298_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB298_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB298_1 +; PPC64LE-NEXT: .LBB298_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test299(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test299: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB299_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB299_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB299_1 +; PPC64LE-NEXT: .LBB299_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test300(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test300: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB300_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB300_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB300_1 +; PPC64LE-NEXT: .LBB300_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test301(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test301: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB301_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB301_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB301_1 +; PPC64LE-NEXT: .LBB301_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test302(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test302: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB302_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB302_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB302_1 +; PPC64LE-NEXT: .LBB302_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test303(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test303: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB303_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB303_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB303_1 +; PPC64LE-NEXT: .LBB303_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test304(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test304: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB304_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB304_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB304_1 +; PPC64LE-NEXT: .LBB304_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test305(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test305: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB305_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB305_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB305_1 +; PPC64LE-NEXT: .LBB305_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test306(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test306: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB306_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB306_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB306_1 +; PPC64LE-NEXT: .LBB306_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test307(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test307: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB307_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB307_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB307_1 +; PPC64LE-NEXT: .LBB307_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test308(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test308: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB308_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB308_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB308_1 +; PPC64LE-NEXT: .LBB308_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test309(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test309: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB309_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB309_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB309_1 +; PPC64LE-NEXT: .LBB309_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test310(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test310: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB310_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB310_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB310_1 +; PPC64LE-NEXT: .LBB310_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test311(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test311: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB311_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB311_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB311_1 +; PPC64LE-NEXT: .LBB311_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test312(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test312: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB312_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB312_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB312_1 +; PPC64LE-NEXT: .LBB312_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test313(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test313: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB313_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB313_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB313_1 +; PPC64LE-NEXT: .LBB313_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test314(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test314: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB314_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB314_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB314_1 +; PPC64LE-NEXT: .LBB314_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test315(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test315: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB315_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB315_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB315_1 +; PPC64LE-NEXT: .LBB315_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test316(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test316: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB316_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpld 4, 3 +; PPC64LE-NEXT: ble 0, .LBB316_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB316_1 +; PPC64LE-NEXT: .LBB316_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test317(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test317: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB317_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB317_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB317_1 +; PPC64LE-NEXT: .LBB317_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test318(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test318: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB318_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB318_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB318_1 +; PPC64LE-NEXT: .LBB318_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test319(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test319: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB319_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB319_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB319_1 +; PPC64LE-NEXT: .LBB319_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test320(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test320: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB320_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB320_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB320_1 +; PPC64LE-NEXT: .LBB320_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val monotonic + ret i8 %ret +} + +define i8 @test321(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test321: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB321_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB321_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB321_1 +; PPC64LE-NEXT: .LBB321_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val acquire + ret i8 %ret +} + +define i8 @test322(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test322: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB322_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB322_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB322_1 +; PPC64LE-NEXT: .LBB322_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val release + ret i8 %ret +} + +define i8 @test323(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test323: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB323_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB323_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB323_1 +; PPC64LE-NEXT: .LBB323_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val acq_rel + ret i8 %ret +} + +define i8 @test324(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test324: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB324_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB324_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB324_1 +; PPC64LE-NEXT: .LBB324_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val seq_cst + ret i8 %ret +} + +define i16 @test325(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test325: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB325_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB325_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB325_1 +; PPC64LE-NEXT: .LBB325_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val monotonic + ret i16 %ret +} + +define i16 @test326(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test326: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB326_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB326_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB326_1 +; PPC64LE-NEXT: .LBB326_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val acquire + ret i16 %ret +} + +define i16 @test327(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test327: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB327_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB327_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB327_1 +; PPC64LE-NEXT: .LBB327_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val release + ret i16 %ret +} + +define i16 @test328(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test328: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB328_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB328_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB328_1 +; PPC64LE-NEXT: .LBB328_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val acq_rel + ret i16 %ret +} + +define i16 @test329(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test329: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB329_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB329_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB329_1 +; PPC64LE-NEXT: .LBB329_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val seq_cst + ret i16 %ret +} + +define i32 @test330(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test330: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB330_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB330_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB330_1 +; PPC64LE-NEXT: .LBB330_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val monotonic + ret i32 %ret +} + +define i32 @test331(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test331: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB331_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB331_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB331_1 +; PPC64LE-NEXT: .LBB331_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val acquire + ret i32 %ret +} + +define i32 @test332(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test332: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB332_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB332_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB332_1 +; PPC64LE-NEXT: .LBB332_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val release + ret i32 %ret +} + +define i32 @test333(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test333: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB333_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB333_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB333_1 +; PPC64LE-NEXT: .LBB333_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val acq_rel + ret i32 %ret +} + +define i32 @test334(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test334: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB334_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB334_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB334_1 +; PPC64LE-NEXT: .LBB334_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val seq_cst + ret i32 %ret +} + +define i64 @test335(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test335: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB335_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB335_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB335_1 +; PPC64LE-NEXT: .LBB335_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val monotonic + ret i64 %ret +} + +define i64 @test336(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test336: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB336_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpld 4, 3 +; PPC64LE-NEXT: bge 0, .LBB336_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB336_1 +; PPC64LE-NEXT: .LBB336_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val acquire + ret i64 %ret +} + +define i64 @test337(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test337: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB337_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB337_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB337_1 +; PPC64LE-NEXT: .LBB337_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val release + ret i64 %ret +} + +define i64 @test338(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test338: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB338_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB338_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB338_1 +; PPC64LE-NEXT: .LBB338_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val acq_rel + ret i64 %ret +} + +define i64 @test339(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test339: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB339_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB339_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB339_1 +; PPC64LE-NEXT: .LBB339_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val seq_cst + ret i64 %ret +} + +define i8 @test340(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test340: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB340_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB340_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test341(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test341: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB341_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB341_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test342(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test342: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB342_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB342_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test343(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test343: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB343_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB343_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test344(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test344: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB344_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB344_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test345(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test345: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB345_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB345_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test346(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test346: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB346_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB346_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test347(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test347: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB347_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB347_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test348(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test348: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB348_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB348_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test349(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test349: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB349_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB349_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test350(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test350: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB350_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB350_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test351(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test351: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB351_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB351_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test352(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test352: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB352_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB352_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test353(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test353: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB353_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB353_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test354(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test354: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB354_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB354_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test355(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test355: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB355_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB355_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test356(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test356: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB356_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB356_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test357(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test357: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB357_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB357_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test358(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test358: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB358_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB358_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test359(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test359: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB359_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB359_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test360(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test360: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB360_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB360_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test361(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test361: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB361_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB361_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test362(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test362: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB362_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB362_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test363(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test363: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB363_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB363_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test364(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test364: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB364_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB364_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test365(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test365: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB365_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB365_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test366(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test366: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB366_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB366_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test367(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test367: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB367_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB367_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test368(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test368: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB368_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB368_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test369(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test369: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB369_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB369_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test370(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test370: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB370_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB370_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test371(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test371: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB371_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB371_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test372(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test372: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB372_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB372_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test373(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test373: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB373_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB373_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test374(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test374: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB374_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB374_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test375(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test375: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB375_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB375_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test376(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test376: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB376_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: add 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB376_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test377(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test377: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB377_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB377_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test378(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test378: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB378_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB378_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test379(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test379: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB379_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: add 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB379_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw add i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test380(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test380: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB380_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB380_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test381(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test381: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB381_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: subf 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB381_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test382(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test382: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB382_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB382_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test383(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test383: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB383_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB383_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test384(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test384: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB384_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB384_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test385(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test385: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB385_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB385_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test386(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test386: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB386_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: subf 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB386_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test387(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test387: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB387_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB387_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test388(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test388: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB388_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB388_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test389(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test389: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB389_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB389_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test390(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test390: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB390_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB390_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test391(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test391: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB391_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: subf 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB391_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test392(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test392: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB392_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB392_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test393(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test393: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB393_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB393_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test394(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test394: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB394_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: subf 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB394_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test395(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test395: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB395_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB395_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test396(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test396: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB396_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: sub 6, 3, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB396_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test397(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test397: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB397_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB397_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test398(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test398: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB398_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB398_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test399(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test399: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB399_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: sub 6, 5, 4 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB399_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw sub i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test400(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test400: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB400_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB400_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test401(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test401: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB401_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB401_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test402(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test402: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB402_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB402_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test403(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test403: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB403_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB403_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test404(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test404: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB404_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB404_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test405(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test405: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB405_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB405_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test406(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test406: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB406_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB406_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test407(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test407: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB407_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB407_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test408(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test408: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB408_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB408_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test409(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test409: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB409_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB409_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test410(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test410: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB410_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB410_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test411(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test411: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB411_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB411_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test412(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test412: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB412_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB412_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test413(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test413: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB413_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB413_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test414(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test414: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB414_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB414_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test415(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test415: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB415_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB415_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test416(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test416: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB416_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: and 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB416_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test417(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test417: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB417_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB417_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test418(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test418: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB418_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB418_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test419(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test419: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB419_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: and 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB419_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw and i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test420(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test420: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB420_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB420_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test421(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test421: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB421_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB421_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test422(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test422: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB422_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB422_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test423(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test423: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB423_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB423_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test424(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test424: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB424_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB424_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test425(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test425: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB425_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB425_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test426(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test426: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB426_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB426_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test427(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test427: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB427_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB427_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test428(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test428: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB428_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB428_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test429(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test429: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB429_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB429_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test430(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test430: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB430_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB430_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test431(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test431: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB431_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB431_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test432(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test432: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB432_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB432_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test433(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test433: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB433_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB433_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test434(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test434: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB434_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB434_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test435(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test435: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB435_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB435_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test436(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test436: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB436_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: nand 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB436_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test437(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test437: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB437_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB437_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test438(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test438: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB438_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB438_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test439(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test439: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB439_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: nand 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB439_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw nand i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test440(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test440: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB440_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB440_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test441(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test441: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB441_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB441_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test442(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test442: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB442_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB442_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test443(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test443: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB443_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB443_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test444(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test444: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB444_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB444_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test445(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test445: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB445_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB445_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test446(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test446: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB446_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB446_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test447(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test447: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB447_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB447_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test448(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test448: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB448_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB448_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test449(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test449: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB449_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB449_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test450(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test450: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB450_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB450_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test451(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test451: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB451_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB451_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test452(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test452: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB452_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB452_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test453(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test453: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB453_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB453_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test454(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test454: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB454_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB454_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test455(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test455: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB455_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB455_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test456(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test456: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB456_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: or 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB456_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test457(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test457: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB457_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB457_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test458(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test458: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB458_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB458_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test459(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test459: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB459_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: or 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB459_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw or i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test460(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test460: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB460_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB460_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test461(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test461: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB461_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: stbcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB461_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test462(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test462: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB462_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB462_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test463(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test463: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB463_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB463_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test464(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test464: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB464_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stbcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB464_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test465(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test465: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB465_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB465_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test466(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test466: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB466_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: sthcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB466_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test467(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test467: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB467_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB467_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test468(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test468: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB468_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB468_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test469(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test469: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB469_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: sthcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB469_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test470(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test470: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB470_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB470_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test471(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test471: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB471_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: stwcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB471_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test472(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test472: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB472_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB472_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test473(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test473: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB473_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB473_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test474(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test474: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB474_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stwcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB474_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test475(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test475: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB475_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB475_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test476(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test476: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB476_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: xor 6, 4, 3 +; PPC64LE-NEXT: stdcx. 6, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB476_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test477(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test477: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB477_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB477_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test478(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test478: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB478_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB478_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test479(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test479: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB479_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: xor 6, 4, 5 +; PPC64LE-NEXT: stdcx. 6, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB479_1 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw xor i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test480(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test480: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB480_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB480_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB480_1 +; PPC64LE-NEXT: .LBB480_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test481(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test481: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB481_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: extsb 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB481_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB481_1 +; PPC64LE-NEXT: .LBB481_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test482(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test482: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB482_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB482_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB482_1 +; PPC64LE-NEXT: .LBB482_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test483(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test483: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB483_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB483_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB483_1 +; PPC64LE-NEXT: .LBB483_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test484(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test484: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB484_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB484_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB484_1 +; PPC64LE-NEXT: .LBB484_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test485(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test485: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB485_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB485_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB485_1 +; PPC64LE-NEXT: .LBB485_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test486(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test486: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB486_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: extsh 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB486_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB486_1 +; PPC64LE-NEXT: .LBB486_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test487(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test487: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB487_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB487_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB487_1 +; PPC64LE-NEXT: .LBB487_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test488(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test488: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB488_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB488_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB488_1 +; PPC64LE-NEXT: .LBB488_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test489(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test489: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB489_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: ble 0, .LBB489_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB489_1 +; PPC64LE-NEXT: .LBB489_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test490(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test490: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB490_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB490_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB490_1 +; PPC64LE-NEXT: .LBB490_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test491(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test491: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB491_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmpw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB491_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB491_1 +; PPC64LE-NEXT: .LBB491_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test492(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test492: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB492_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB492_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB492_1 +; PPC64LE-NEXT: .LBB492_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test493(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test493: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB493_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB493_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB493_1 +; PPC64LE-NEXT: .LBB493_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test494(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test494: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB494_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB494_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB494_1 +; PPC64LE-NEXT: .LBB494_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test495(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test495: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB495_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB495_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB495_1 +; PPC64LE-NEXT: .LBB495_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test496(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test496: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB496_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpd 4, 3 +; PPC64LE-NEXT: ble 0, .LBB496_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB496_1 +; PPC64LE-NEXT: .LBB496_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test497(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test497: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB497_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB497_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB497_1 +; PPC64LE-NEXT: .LBB497_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test498(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test498: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB498_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB498_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB498_1 +; PPC64LE-NEXT: .LBB498_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test499(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test499: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB499_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: ble 0, .LBB499_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB499_1 +; PPC64LE-NEXT: .LBB499_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw max i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test500(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test500: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB500_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB500_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB500_1 +; PPC64LE-NEXT: .LBB500_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test501(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test501: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB501_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: extsb 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB501_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB501_1 +; PPC64LE-NEXT: .LBB501_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test502(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test502: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB502_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB502_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB502_1 +; PPC64LE-NEXT: .LBB502_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test503(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test503: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB503_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB503_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB503_1 +; PPC64LE-NEXT: .LBB503_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test504(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test504: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB504_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: extsb 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB504_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB504_1 +; PPC64LE-NEXT: .LBB504_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test505(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test505: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB505_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB505_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB505_1 +; PPC64LE-NEXT: .LBB505_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test506(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test506: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB506_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: extsh 6, 3 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB506_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB506_1 +; PPC64LE-NEXT: .LBB506_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test507(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test507: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB507_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB507_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB507_1 +; PPC64LE-NEXT: .LBB507_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test508(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test508: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB508_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB508_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB508_1 +; PPC64LE-NEXT: .LBB508_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test509(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test509: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB509_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: extsh 6, 5 +; PPC64LE-NEXT: cmpw 4, 6 +; PPC64LE-NEXT: bge 0, .LBB509_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB509_1 +; PPC64LE-NEXT: .LBB509_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test510(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test510: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB510_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB510_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB510_1 +; PPC64LE-NEXT: .LBB510_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test511(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test511: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB511_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmpw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB511_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB511_1 +; PPC64LE-NEXT: .LBB511_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test512(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test512: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB512_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB512_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB512_1 +; PPC64LE-NEXT: .LBB512_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test513(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test513: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB513_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB513_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB513_1 +; PPC64LE-NEXT: .LBB513_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test514(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test514: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB514_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmpw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB514_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB514_1 +; PPC64LE-NEXT: .LBB514_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test515(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test515: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB515_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB515_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB515_1 +; PPC64LE-NEXT: .LBB515_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test516(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test516: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB516_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpd 4, 3 +; PPC64LE-NEXT: bge 0, .LBB516_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB516_1 +; PPC64LE-NEXT: .LBB516_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test517(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test517: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB517_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB517_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB517_1 +; PPC64LE-NEXT: .LBB517_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test518(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test518: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB518_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB518_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB518_1 +; PPC64LE-NEXT: .LBB518_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test519(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test519: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB519_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpd 4, 5 +; PPC64LE-NEXT: bge 0, .LBB519_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB519_1 +; PPC64LE-NEXT: .LBB519_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw min i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test520(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test520: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB520_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB520_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB520_1 +; PPC64LE-NEXT: .LBB520_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test521(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test521: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB521_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB521_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB521_1 +; PPC64LE-NEXT: .LBB521_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test522(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test522: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB522_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB522_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB522_1 +; PPC64LE-NEXT: .LBB522_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test523(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test523: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB523_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB523_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB523_1 +; PPC64LE-NEXT: .LBB523_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test524(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test524: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB524_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB524_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB524_1 +; PPC64LE-NEXT: .LBB524_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test525(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test525: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB525_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB525_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB525_1 +; PPC64LE-NEXT: .LBB525_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test526(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test526: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB526_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB526_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB526_1 +; PPC64LE-NEXT: .LBB526_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test527(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test527: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB527_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB527_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB527_1 +; PPC64LE-NEXT: .LBB527_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test528(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test528: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB528_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB528_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB528_1 +; PPC64LE-NEXT: .LBB528_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test529(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test529: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB529_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB529_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB529_1 +; PPC64LE-NEXT: .LBB529_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test530(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test530: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB530_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB530_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB530_1 +; PPC64LE-NEXT: .LBB530_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test531(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test531: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB531_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: ble 0, .LBB531_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB531_1 +; PPC64LE-NEXT: .LBB531_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test532(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test532: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB532_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB532_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB532_1 +; PPC64LE-NEXT: .LBB532_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test533(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test533: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB533_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB533_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB533_1 +; PPC64LE-NEXT: .LBB533_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test534(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test534: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB534_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: ble 0, .LBB534_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB534_1 +; PPC64LE-NEXT: .LBB534_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test535(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test535: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB535_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB535_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB535_1 +; PPC64LE-NEXT: .LBB535_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test536(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test536: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB536_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpld 4, 3 +; PPC64LE-NEXT: ble 0, .LBB536_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB536_1 +; PPC64LE-NEXT: .LBB536_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test537(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test537: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB537_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB537_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB537_1 +; PPC64LE-NEXT: .LBB537_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test538(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test538: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB538_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB538_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB538_1 +; PPC64LE-NEXT: .LBB538_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test539(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test539: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB539_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: ble 0, .LBB539_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB539_1 +; PPC64LE-NEXT: .LBB539_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umax i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} + +define i8 @test540(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test540: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB540_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB540_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB540_1 +; PPC64LE-NEXT: .LBB540_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val singlethread monotonic + ret i8 %ret +} + +define i8 @test541(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test541: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB541_1: +; PPC64LE-NEXT: lbarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB541_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB541_1 +; PPC64LE-NEXT: .LBB541_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acquire + ret i8 %ret +} + +define i8 @test542(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test542: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB542_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB542_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB542_1 +; PPC64LE-NEXT: .LBB542_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val singlethread release + ret i8 %ret +} + +define i8 @test543(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test543: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB543_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB543_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB543_1 +; PPC64LE-NEXT: .LBB543_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acq_rel + ret i8 %ret +} + +define i8 @test544(i8* %ptr, i8 %val) { +; PPC64LE-LABEL: test544: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB544_1: +; PPC64LE-NEXT: lbarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB544_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stbcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB544_1 +; PPC64LE-NEXT: .LBB544_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i8* %ptr, i8 %val singlethread seq_cst + ret i8 %ret +} + +define i16 @test545(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test545: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB545_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB545_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB545_1 +; PPC64LE-NEXT: .LBB545_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val singlethread monotonic + ret i16 %ret +} + +define i16 @test546(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test546: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB546_1: +; PPC64LE-NEXT: lharx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB546_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB546_1 +; PPC64LE-NEXT: .LBB546_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acquire + ret i16 %ret +} + +define i16 @test547(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test547: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB547_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB547_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB547_1 +; PPC64LE-NEXT: .LBB547_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val singlethread release + ret i16 %ret +} + +define i16 @test548(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test548: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB548_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB548_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB548_1 +; PPC64LE-NEXT: .LBB548_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acq_rel + ret i16 %ret +} + +define i16 @test549(i16* %ptr, i16 %val) { +; PPC64LE-LABEL: test549: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB549_1: +; PPC64LE-NEXT: lharx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB549_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: sthcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB549_1 +; PPC64LE-NEXT: .LBB549_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i16* %ptr, i16 %val singlethread seq_cst + ret i16 %ret +} + +define i32 @test550(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test550: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB550_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB550_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB550_1 +; PPC64LE-NEXT: .LBB550_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val singlethread monotonic + ret i32 %ret +} + +define i32 @test551(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test551: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB551_1: +; PPC64LE-NEXT: lwarx 3, 0, 5 +; PPC64LE-NEXT: cmplw 4, 3 +; PPC64LE-NEXT: bge 0, .LBB551_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB551_1 +; PPC64LE-NEXT: .LBB551_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acquire + ret i32 %ret +} + +define i32 @test552(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test552: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB552_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB552_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB552_1 +; PPC64LE-NEXT: .LBB552_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val singlethread release + ret i32 %ret +} + +define i32 @test553(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test553: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB553_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB553_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB553_1 +; PPC64LE-NEXT: .LBB553_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acq_rel + ret i32 %ret +} + +define i32 @test554(i32* %ptr, i32 %val) { +; PPC64LE-LABEL: test554: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB554_1: +; PPC64LE-NEXT: lwarx 5, 0, 3 +; PPC64LE-NEXT: cmplw 4, 5 +; PPC64LE-NEXT: bge 0, .LBB554_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stwcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB554_1 +; PPC64LE-NEXT: .LBB554_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i32* %ptr, i32 %val singlethread seq_cst + ret i32 %ret +} + +define i64 @test555(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test555: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: .LBB555_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB555_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB555_1 +; PPC64LE-NEXT: .LBB555_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val singlethread monotonic + ret i64 %ret +} + +define i64 @test556(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test556: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: mr 5, 3 +; PPC64LE-NEXT: .LBB556_1: +; PPC64LE-NEXT: ldarx 3, 0, 5 +; PPC64LE-NEXT: cmpld 4, 3 +; PPC64LE-NEXT: bge 0, .LBB556_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 5 +; PPC64LE-NEXT: bne 0, .LBB556_1 +; PPC64LE-NEXT: .LBB556_3: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acquire + ret i64 %ret +} + +define i64 @test557(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test557: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB557_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB557_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB557_1 +; PPC64LE-NEXT: .LBB557_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val singlethread release + ret i64 %ret +} + +define i64 @test558(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test558: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: .LBB558_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB558_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB558_1 +; PPC64LE-NEXT: .LBB558_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acq_rel + ret i64 %ret +} + +define i64 @test559(i64* %ptr, i64 %val) { +; PPC64LE-LABEL: test559: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: sync +; PPC64LE-NEXT: .LBB559_1: +; PPC64LE-NEXT: ldarx 5, 0, 3 +; PPC64LE-NEXT: cmpld 4, 5 +; PPC64LE-NEXT: bge 0, .LBB559_3 +; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: stdcx. 4, 0, 3 +; PPC64LE-NEXT: bne 0, .LBB559_1 +; PPC64LE-NEXT: .LBB559_3: +; PPC64LE-NEXT: mr 3, 5 +; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: blr + %ret = atomicrmw umin i64* %ptr, i64 %val singlethread seq_cst + ret i64 %ret +} Index: llvm/utils/update_llc_test_checks.py =================================================================== --- llvm/utils/update_llc_test_checks.py +++ llvm/utils/update_llc_test_checks.py @@ -29,6 +29,8 @@ SCRUB_WHITESPACE_RE = re.compile(r'(?!^(| \w))[ \t]+', flags=re.M) SCRUB_TRAILING_WHITESPACE_RE = re.compile(r'[ \t]+$', flags=re.M) SCRUB_KILL_COMMENT_RE = re.compile(r'^ *#+ +kill:.*\n') +SCRUB_LOOP_COMMENT_RE = re.compile( + r'# =>This Inner Loop Header:.*|# in Loop:.*', flags=re.M) ASM_FUNCTION_X86_RE = re.compile( r'^_?(?P[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?' @@ -114,6 +116,8 @@ asm = SCRUB_WHITESPACE_RE.sub(r' ', asm) # Expand the tabs used for indentation. asm = string.expandtabs(asm, 2) + # Stripe unimportant comments + asm = SCRUB_LOOP_COMMENT_RE.sub(r'', asm) # Strip trailing whitespace. asm = SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm) return asm