diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -221,6 +221,31 @@ } } + if (LI && TLI->shouldCastAtomicLoadInIR(LI) == + TargetLoweringBase::AtomicExpansionKind::CastToInteger) { + I = LI = convertAtomicLoadToIntegerType(LI); + MadeChange = true; + } else if (SI && + TLI->shouldCastAtomicStoreInIR(SI) == + TargetLoweringBase::AtomicExpansionKind::CastToInteger) { + I = SI = convertAtomicStoreToIntegerType(SI); + MadeChange = true; + } else if (RMWI && + TLI->shouldCastAtomicRMWIInIR(RMWI) == + TargetLoweringBase::AtomicExpansionKind::CastToInteger) { + I = RMWI = convertAtomicXchgToIntegerType(RMWI); + MadeChange = true; + } else if (CASI) { + // TODO: when we're ready to make the change at the IR level, we can + // extend convertCmpXchgToInteger for floating point too. + if (CASI->getCompareOperand()->getType()->isPointerTy()) { + // TODO: add a TLI hook to control this so that each target can + // convert to lowering the original type one at a time. + I = CASI = convertCmpXchgToIntegerType(CASI); + MadeChange = true; + } + } + if (TLI->shouldInsertFencesForAtomic(I)) { auto FenceOrdering = AtomicOrdering::Monotonic; if (LI && isAcquireOrStronger(LI->getOrdering())) { @@ -253,31 +278,11 @@ } } - if (LI) { - if (TLI->shouldCastAtomicLoadInIR(LI) == - TargetLoweringBase::AtomicExpansionKind::CastToInteger) { - // TODO: add a TLI hook to control this so that each target can - // convert to lowering the original type one at a time. - LI = convertAtomicLoadToIntegerType(LI); - assert(LI->getType()->isIntegerTy() && "invariant broken"); - MadeChange = true; - } - + if (LI) MadeChange |= tryExpandAtomicLoad(LI); - } else if (SI) { - if (TLI->shouldCastAtomicStoreInIR(SI) == - TargetLoweringBase::AtomicExpansionKind::CastToInteger) { - // TODO: add a TLI hook to control this so that each target can - // convert to lowering the original type one at a time. - SI = convertAtomicStoreToIntegerType(SI); - assert(SI->getValueOperand()->getType()->isIntegerTy() && - "invariant broken"); - MadeChange = true; - } - - if (tryExpandAtomicStore(SI)) - MadeChange = true; - } else if (RMWI) { + else if (SI) + MadeChange |= tryExpandAtomicStore(SI); + else if (RMWI) { // There are two different ways of expanding RMW instructions: // - into a load if it is idempotent // - into a Cmpxchg/LL-SC loop otherwise @@ -287,15 +292,6 @@ MadeChange = true; } else { AtomicRMWInst::BinOp Op = RMWI->getOperation(); - if (TLI->shouldCastAtomicRMWIInIR(RMWI) == - TargetLoweringBase::AtomicExpansionKind::CastToInteger) { - // TODO: add a TLI hook to control this so that each target can - // convert to lowering the original type one at a time. - RMWI = convertAtomicXchgToIntegerType(RMWI); - assert(RMWI->getValOperand()->getType()->isIntegerTy() && - "invariant broken"); - MadeChange = true; - } unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; unsigned ValueSize = getAtomicOpSize(RMWI); if (ValueSize < MinCASSize && @@ -307,22 +303,8 @@ MadeChange |= tryExpandAtomicRMW(RMWI); } - } else if (CASI) { - // TODO: when we're ready to make the change at the IR level, we can - // extend convertCmpXchgToInteger for floating point too. - assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() && - "unimplemented - floating point not legal at IR level"); - if (CASI->getCompareOperand()->getType()->isPointerTy()) { - // TODO: add a TLI hook to control this so that each target can - // convert to lowering the original type one at a time. - CASI = convertCmpXchgToIntegerType(CASI); - assert(CASI->getCompareOperand()->getType()->isIntegerTy() && - "invariant broken"); - MadeChange = true; - } - + } else if (CASI) MadeChange |= tryExpandAtomicCmpXchg(CASI); - } } return MadeChange; } diff --git a/llvm/test/CodeGen/PowerPC/cfence-double.ll b/llvm/test/CodeGen/PowerPC/cfence-double.ll --- a/llvm/test/CodeGen/PowerPC/cfence-double.ll +++ b/llvm/test/CodeGen/PowerPC/cfence-double.ll @@ -1,12 +1,28 @@ -; REQUIRES: asserts -; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \ +; RUN: < %s 2>&1 | FileCheck --check-prefix=CHECK-LE %s +; RUN: llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \ ; RUN: < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \ -; RUN: < %s 2>&1 | FileCheck %s - -; CHECK: Assertion{{.*}}VT.isInteger() && Operand.getValueType().isInteger() && "Invalid ANY_EXTEND!" define double @foo(double* %dp) { +; CHECK-LE-LABEL: foo: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: ld 3, 0(3) +; CHECK-LE-NEXT: cmpd 7, 3, 3 +; CHECK-LE-NEXT: mtfprd 1, 3 +; CHECK-LE-NEXT: bne- 7, .+4 +; CHECK-LE-NEXT: isync +; CHECK-LE-NEXT: blr +; +; CHECK-LABEL: foo: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ld 3, 0(3) +; CHECK-NEXT: cmpd 7, 3, 3 +; CHECK-NEXT: bne- 7, .+4 +; CHECK-NEXT: isync +; CHECK-NEXT: std 3, -8(1) +; CHECK-NEXT: lfd 1, -8(1) +; CHECK-NEXT: blr entry: %0 = load atomic double, double* %dp acquire, align 8 ret double %0 diff --git a/llvm/test/CodeGen/PowerPC/cfence-float.ll b/llvm/test/CodeGen/PowerPC/cfence-float.ll --- a/llvm/test/CodeGen/PowerPC/cfence-float.ll +++ b/llvm/test/CodeGen/PowerPC/cfence-float.ll @@ -1,12 +1,30 @@ -; REQUIRES: asserts -; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \ +; RUN: < %s 2>&1 | FileCheck --check-prefix=CHECK-LE %s +; RUN: llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \ ; RUN: < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \ -; RUN: < %s 2>&1 | FileCheck %s - -; CHECK: Assertion{{.*}}VT.isInteger() && Operand.getValueType().isInteger() && "Invalid ANY_EXTEND!" define float @bar(float* %fp) { +; CHECK-LE-LABEL: bar: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: lwz 3, 0(3) +; CHECK-LE-NEXT: mtfprd 0, 3 +; CHECK-LE-NEXT: cmpd 7, 3, 3 +; CHECK-LE-NEXT: xxsldwi 0, 0, 0, 1 +; CHECK-LE-NEXT: bne- 7, .+4 +; CHECK-LE-NEXT: isync +; CHECK-LE-NEXT: xscvspdpn 1, 0 +; CHECK-LE-NEXT: blr +; +; CHECK-LABEL: bar: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lwz 3, 0(3) +; CHECK-NEXT: cmpd 7, 3, 3 +; CHECK-NEXT: bne- 7, .+4 +; CHECK-NEXT: isync +; CHECK-NEXT: stw 3, -4(1) +; CHECK-NEXT: lfs 1, -4(1) +; CHECK-NEXT: blr entry: %0 = load atomic float, float* %fp acquire, align 4 ret float %0 diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll --- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll @@ -8,8 +8,8 @@ ; CHECK-LABEL: @foo( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[DP:%.*]] monotonic, align 8 +; CHECK-NEXT: call void @llvm.ppc.cfence.i64(i64 [[TMP0]]) ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[TMP0]] to double -; CHECK-NEXT: call void @llvm.ppc.cfence.f64(double [[TMP1]]) ; CHECK-NEXT: ret double [[TMP1]] ; entry: diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll --- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll @@ -8,8 +8,8 @@ ; CHECK-LABEL: @bar( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[FP:%.*]] monotonic, align 4 +; CHECK-NEXT: call void @llvm.ppc.cfence.i32(i32 [[TMP0]]) ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP0]] to float -; CHECK-NEXT: call void @llvm.ppc.cfence.f32(float [[TMP1]]) ; CHECK-NEXT: ret float [[TMP1]] ; entry: