diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -357,6 +357,11 @@ // condition branches. setJumpIsExpensive(true); + // Force atomics to be expanded if the ISA doesn't support them: PR44219 + setMinCmpXchgSizeInBits(32); + setMaxAtomicSizeInBitsSupported(64); + setSupportsUnalignedAtomics(false); + // Wide divides are _very_ slow. Try to reduce the width of the divide if // possible. addBypassSlowDiv(64, 32); diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp --- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -270,6 +270,7 @@ addPass(createNVPTXImageOptimizerPass()); addPass(createNVPTXAssignValidGlobalNamesPass()); addPass(createGenericToNVVMPass()); + addPass(createAtomicExpandPass()); // NVPTXLowerArgs is required for correctness and should be run right // before the address space inference passes. diff --git a/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-i16.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-i16.ll @@ -0,0 +1,183 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -mtriple=nvptx-unknown-unknown -S -atomic-expand %s | FileCheck %s +; RUN: opt -mtriple=nvptx64-unknown-unknown -S -atomic-expand %s | FileCheck %s + +define i16 @test_atomicrmw_xchg_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_xchg_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw xchg i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw xchg i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_add_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_add_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw add i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw add i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_sub_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_sub_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw sub i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw sub i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_and_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_and_i16_global( +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i16 addrspace(1)* [[PTR:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[VALUE:%.*]] to i32 +; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[INV_MASK]], [[VALOPERAND_SHIFTED]] +; CHECK-NEXT: [[TMP5:%.*]] = atomicrmw and i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst +; CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +; CHECK-NEXT: ret i16 [[TMP7]] +; + %res = atomicrmw and i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_nand_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_nand_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw nand i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw nand i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_or_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_or_i16_global( +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i16 addrspace(1)* [[PTR:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[VALUE:%.*]] to i32 +; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP5:%.*]] = atomicrmw or i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst +; CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +; CHECK-NEXT: ret i16 [[TMP7]] +; + %res = atomicrmw or i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_xor_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_xor_i16_global( +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i16 addrspace(1)* [[PTR:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[VALUE:%.*]] to i32 +; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP5:%.*]] = atomicrmw xor i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst +; CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +; CHECK-NEXT: ret i16 [[TMP7]] +; + %res = atomicrmw xor i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_max_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_max_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw max i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw max i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_min_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_min_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw min i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw min i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_umax_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_umax_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw umax i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw umax i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_atomicrmw_umin_i16_global(i16 addrspace(1)* %ptr, i16 %value) { +; CHECK-LABEL: @test_atomicrmw_umin_i16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw umin i16 addrspace(1)* [[PTR:%.*]], i16 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i16 [[RES]] +; + %res = atomicrmw umin i16 addrspace(1)* %ptr, i16 %value seq_cst + ret i16 %res +} + +define i16 @test_cmpxchg_i16_global(i16 addrspace(1)* %out, i16 %in, i16 %old) { +; CHECK-LABEL: @test_cmpxchg_i16_global( +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, i16 addrspace(1)* [[OUT:%.*]], i64 4 +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i16 addrspace(1)* [[GEP]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[IN:%.*]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP6:%.*]] = zext i16 [[OLD:%.*]] to i32 +; CHECK-NEXT: [[TMP7:%.*]] = shl i32 [[TMP6]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32 addrspace(1)* [[ALIGNEDADDR]] +; CHECK-NEXT: [[TMP9:%.*]] = and i32 [[TMP8]], [[INV_MASK]] +; CHECK-NEXT: br label [[PARTWORD_CMPXCHG_LOOP:%.*]] +; CHECK: partword.cmpxchg.loop: +; CHECK-NEXT: [[TMP10:%.*]] = phi i32 [ [[TMP9]], [[TMP0:%.*]] ], [ [[TMP16:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = or i32 [[TMP10]], [[TMP5]] +; CHECK-NEXT: [[TMP12:%.*]] = or i32 [[TMP10]], [[TMP7]] +; CHECK-NEXT: [[TMP13:%.*]] = cmpxchg i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[TMP12]], i32 [[TMP11]] seq_cst seq_cst +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { i32, i1 } [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { i32, i1 } [[TMP13]], 1 +; CHECK-NEXT: br i1 [[TMP15]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]] +; CHECK: partword.cmpxchg.failure: +; CHECK-NEXT: [[TMP16]] = and i32 [[TMP14]], [[INV_MASK]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP10]], [[TMP16]] +; CHECK-NEXT: br i1 [[TMP17]], label [[PARTWORD_CMPXCHG_LOOP]], label [[PARTWORD_CMPXCHG_END]] +; CHECK: partword.cmpxchg.end: +; CHECK-NEXT: [[TMP18:%.*]] = lshr i32 [[TMP14]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16 +; CHECK-NEXT: [[TMP20:%.*]] = insertvalue { i16, i1 } undef, i16 [[TMP19]], 0 +; CHECK-NEXT: [[TMP21:%.*]] = insertvalue { i16, i1 } [[TMP20]], i1 [[TMP15]], 1 +; CHECK-NEXT: [[EXTRACT:%.*]] = extractvalue { i16, i1 } [[TMP21]], 0 +; CHECK-NEXT: ret i16 [[EXTRACT]] +; + %gep = getelementptr i16, i16 addrspace(1)* %out, i64 4 + %res = cmpxchg i16 addrspace(1)* %gep, i16 %old, i16 %in seq_cst seq_cst + %extract = extractvalue {i16, i1} %res, 0 + ret i16 %extract +} diff --git a/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-i8.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-i8.ll @@ -0,0 +1,183 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -mtriple=nvptx-unknown-unknown -S -atomic-expand %s | FileCheck %s +; RUN: opt -mtriple=nvptx64-unknown-unknown -S -atomic-expand %s | FileCheck %s + +define i8 @test_atomicrmw_xchg_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_xchg_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw xchg i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw xchg i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_add_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_add_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw add i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw add i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_sub_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_sub_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw sub i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw sub i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_and_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_and_i8_global( +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[PTR:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[VALUE:%.*]] to i32 +; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[INV_MASK]], [[VALOPERAND_SHIFTED]] +; CHECK-NEXT: [[TMP5:%.*]] = atomicrmw and i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst +; CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8 +; CHECK-NEXT: ret i8 [[TMP7]] +; + %res = atomicrmw and i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_nand_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_nand_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw nand i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw nand i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_or_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_or_i8_global( +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[PTR:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[VALUE:%.*]] to i32 +; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP5:%.*]] = atomicrmw or i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst +; CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8 +; CHECK-NEXT: ret i8 [[TMP7]] +; + %res = atomicrmw or i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_xor_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_xor_i8_global( +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[PTR:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[VALUE:%.*]] to i32 +; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP5:%.*]] = atomicrmw xor i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst +; CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8 +; CHECK-NEXT: ret i8 [[TMP7]] +; + %res = atomicrmw xor i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_max_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_max_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw max i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw max i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_min_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_min_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw min i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw min i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_umax_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_umax_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw umax i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw umax i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_atomicrmw_umin_i8_global(i8 addrspace(1)* %ptr, i8 %value) { +; CHECK-LABEL: @test_atomicrmw_umin_i8_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw umin i8 addrspace(1)* [[PTR:%.*]], i8 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i8 [[RES]] +; + %res = atomicrmw umin i8 addrspace(1)* %ptr, i8 %value seq_cst + ret i8 %res +} + +define i8 @test_cmpxchg_i8_global(i8 addrspace(1)* %out, i8 %in, i8 %old) { +; CHECK-LABEL: @test_cmpxchg_i8_global( +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, i8 addrspace(1)* [[OUT:%.*]], i64 4 +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[GEP]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4 +; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP2]] to i32 addrspace(1)* +; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[PTRLSB]], 3 +; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]] +; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[IN:%.*]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[OLD:%.*]] to i32 +; CHECK-NEXT: [[TMP7:%.*]] = shl i32 [[TMP6]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32 addrspace(1)* [[ALIGNEDADDR]] +; CHECK-NEXT: [[TMP9:%.*]] = and i32 [[TMP8]], [[INV_MASK]] +; CHECK-NEXT: br label [[PARTWORD_CMPXCHG_LOOP:%.*]] +; CHECK: partword.cmpxchg.loop: +; CHECK-NEXT: [[TMP10:%.*]] = phi i32 [ [[TMP9]], [[TMP0:%.*]] ], [ [[TMP16:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = or i32 [[TMP10]], [[TMP5]] +; CHECK-NEXT: [[TMP12:%.*]] = or i32 [[TMP10]], [[TMP7]] +; CHECK-NEXT: [[TMP13:%.*]] = cmpxchg i32 addrspace(1)* [[ALIGNEDADDR]], i32 [[TMP12]], i32 [[TMP11]] seq_cst seq_cst +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { i32, i1 } [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { i32, i1 } [[TMP13]], 1 +; CHECK-NEXT: br i1 [[TMP15]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]] +; CHECK: partword.cmpxchg.failure: +; CHECK-NEXT: [[TMP16]] = and i32 [[TMP14]], [[INV_MASK]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP10]], [[TMP16]] +; CHECK-NEXT: br i1 [[TMP17]], label [[PARTWORD_CMPXCHG_LOOP]], label [[PARTWORD_CMPXCHG_END]] +; CHECK: partword.cmpxchg.end: +; CHECK-NEXT: [[TMP18:%.*]] = lshr i32 [[TMP14]], [[SHIFTAMT]] +; CHECK-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i8 +; CHECK-NEXT: [[TMP20:%.*]] = insertvalue { i8, i1 } undef, i8 [[TMP19]], 0 +; CHECK-NEXT: [[TMP21:%.*]] = insertvalue { i8, i1 } [[TMP20]], i1 [[TMP15]], 1 +; CHECK-NEXT: [[EXTRACT:%.*]] = extractvalue { i8, i1 } [[TMP21]], 0 +; CHECK-NEXT: ret i8 [[EXTRACT]] +; + %gep = getelementptr i8, i8 addrspace(1)* %out, i64 4 + %res = cmpxchg i8 addrspace(1)* %gep, i8 %old, i8 %in seq_cst seq_cst + %extract = extractvalue {i8, i1} %res, 0 + ret i8 %extract +} diff --git a/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-fadd.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-fadd.ll @@ -0,0 +1,186 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=nvptx-unknown-unknown -mcpu=sm_30 -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=nvptx-unknown-unknown -mcpu=sm_60 -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=nvptx-unknown-unknown -mcpu=sm_75 -atomic-expand %s | FileCheck %s + +define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f32_flat( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fadd float* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fadd_f32_global(float addrspace(1)* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f32_global( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst + ret float %res +} + +define void @test_atomicrmw_fadd_f32_global_no_use(float addrspace(1)* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f32_global_no_use( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret void +; + %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst + ret void +} + +define float @test_atomicrmw_fadd_f32_local(float addrspace(3)* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f32_local( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fadd float addrspace(3)* %ptr, float %value seq_cst + ret float %res +} + +define half @test_atomicrmw_fadd_f16_flat(half* %ptr, half %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f16_flat( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw fadd half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret half [[RES]] +; + %res = atomicrmw fadd half* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fadd_f16_global(half addrspace(1)* %ptr, half %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret half [[RES]] +; + %res = atomicrmw fadd half addrspace(1)* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fadd_f16_local(half addrspace(3)* %ptr, half %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f16_local( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret half [[RES]] +; + %res = atomicrmw fadd half addrspace(3)* %ptr, half %value seq_cst + ret half %res +} + +define double @test_atomicrmw_fadd_f64_flat(double* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f64_flat( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fadd double* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fadd_f64_global(double addrspace(1)* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f64_global( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fadd double addrspace(1)* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fadd_f64_local(double addrspace(3)* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f64_local( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fadd double addrspace(3)* %ptr, double %value seq_cst + ret double %res +} + diff --git a/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-fsub.ll b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-fsub.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-fsub.ll @@ -0,0 +1,162 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=nvptx64-unknown-unknown -mcpu=sm_30 -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=nvptx64-unknown-unknown -mcpu=sm_75 -atomic-expand %s | FileCheck %s + +define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f32_flat( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fsub float* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fsub_f32_global(float addrspace(1)* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f32_global( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fsub float addrspace(1)* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fsub_f32_local(float addrspace(3)* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f32_local( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fsub float addrspace(3)* %ptr, float %value seq_cst + ret float %res +} + +define half @test_atomicrmw_fsub_f16_flat(half* %ptr, half %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f16_flat( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw fsub half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret half [[RES]] +; + %res = atomicrmw fsub half* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fsub_f16_global(half addrspace(1)* %ptr, half %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f16_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw fsub half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret half [[RES]] +; + %res = atomicrmw fsub half addrspace(1)* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fsub_f16_local(half addrspace(3)* %ptr, half %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f16_local( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw fsub half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret half [[RES]] +; + %res = atomicrmw fsub half addrspace(3)* %ptr, half %value seq_cst + ret half %res +} + +define double @test_atomicrmw_fsub_f64_flat(double* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f64_flat( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fsub double* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fsub_f64_global(double addrspace(1)* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f64_global( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fsub double addrspace(1)* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fsub_f64_local(double addrspace(3)* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f64_local( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fsub double addrspace(3)* %ptr, double %value seq_cst + ret double %res +} diff --git a/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-nand.ll b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-nand.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/NVPTX/expand-atomic-rmw-nand.ll @@ -0,0 +1,30 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -mtriple=nvptx64-unknown-unknown -S -atomic-expand %s | FileCheck %s +; RUN: opt -mtriple=nvptx64-unknown-unknown -S -atomic-expand %s | FileCheck %s + +define i32 @test_atomicrmw_nand_i32_flat(i32* %ptr, i32 %value) { +; CHECK-LABEL: @test_atomicrmw_nand_i32_flat( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw nand i32* [[PTR:%.*]], i32 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i32 [[RES]] +; + %res = atomicrmw nand i32* %ptr, i32 %value seq_cst + ret i32 %res +} + +define i32 @test_atomicrmw_nand_i32_global(i32 addrspace(1)* %ptr, i32 %value) { +; CHECK-LABEL: @test_atomicrmw_nand_i32_global( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw nand i32 addrspace(1)* [[PTR:%.*]], i32 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i32 [[RES]] +; + %res = atomicrmw nand i32 addrspace(1)* %ptr, i32 %value seq_cst + ret i32 %res +} + +define i32 @test_atomicrmw_nand_i32_local(i32 addrspace(3)* %ptr, i32 %value) { +; CHECK-LABEL: @test_atomicrmw_nand_i32_local( +; CHECK-NEXT: [[RES:%.*]] = atomicrmw nand i32 addrspace(3)* [[PTR:%.*]], i32 [[VALUE:%.*]] seq_cst +; CHECK-NEXT: ret i32 [[RES]] +; + %res = atomicrmw nand i32 addrspace(3)* %ptr, i32 %value seq_cst + ret i32 %res +} diff --git a/llvm/test/Transforms/AtomicExpand/NVPTX/lit.local.cfg b/llvm/test/Transforms/AtomicExpand/NVPTX/lit.local.cfg new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/NVPTX/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'NVPTX' in config.root.targets: + config.unsupported = True diff --git a/llvm/test/Transforms/AtomicExpand/NVPTX/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/NVPTX/unaligned-atomic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/NVPTX/unaligned-atomic.ll @@ -0,0 +1,34 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=nvptx64-unknown-unknown -atomic-expand %s | FileCheck -check-prefix=CHECK %s + +define i32 @atomic_load_global_align1(i32 addrspace(1)* %ptr) { +; CHECK-LABEL: @atomic_load_global_align1( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)* +; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(1)* [[TMP1]] to i8* +; CHECK-NEXT: [[TMP3:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to i8* +; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]]) +; CHECK-NEXT: call void @__atomic_load(i64 4, i8* [[TMP2]], i8* [[TMP4]], i32 5) +; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]], align 4 +; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]]) +; CHECK-NEXT: ret i32 [[TMP5]] +; + %val = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 1 + ret i32 %val +} + +define void @atomic_store_global_align1(i32 addrspace(1)* %ptr, i32 %val) { +; CHECK-LABEL: @atomic_store_global_align1( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)* +; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(1)* [[TMP1]] to i8* +; CHECK-NEXT: [[TMP3:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to i8* +; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]]) +; CHECK-NEXT: store i32 [[VAL:%.*]], i32* [[TMP3]], align 4 +; CHECK-NEXT: call void @__atomic_store(i64 4, i8* [[TMP2]], i8* [[TMP4]], i32 0) +; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]]) +; CHECK-NEXT: ret void +; + store atomic i32 %val, i32 addrspace(1)* %ptr monotonic, align 1 + ret void +}