diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -43,7 +43,7 @@ AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), EvaluationKind(TEK_Scalar), UseLibcall(true) { - assert(!lvalue.isGlobalReg()); + assert(!lvalue.isGlobalReg() && !lvalue.isConditional()); ASTContext &C = CGF.getContext(); if (lvalue.isSimple()) { AtomicTy = lvalue.getType(); @@ -1522,20 +1522,35 @@ return Load; } +/// Determine whether some condition is true for all conditional cases in the +/// given LValue. +template +static bool forAllConditionals(LValue LV, Fn F) { + if (LV.isConditional()) { + return forAllConditionals(LV.getConditionalValue(0), F) && + forAllConditionals(LV.getConditionalValue(1), F); + } + return F(LV); +} + /// An LValue is a candidate for having its loads and stores be made atomic if /// we are operating under /volatile:ms *and* the LValue itself is volatile and /// performing such an operation can be performed without a libcall. bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { if (!CGM.getCodeGenOpts().MSVolatile) return false; - AtomicInfo AI(*this, LV); bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType()); - // An atomic is inline if we don't need to use a libcall. - bool AtomicIsInline = !AI.shouldUseLibcall(); + if (!IsVolatile) + return false; // MSVC doesn't seem to do this for types wider than a pointer. if (getContext().getTypeSize(LV.getType()) > getContext().getTypeSize(getContext().getIntPtrType())) return false; - return IsVolatile && AtomicIsInline; + // An atomic is inline if we don't need to use a libcall. + bool AtomicIsInline = forAllConditionals(LV, [&](LValue LV) { + AtomicInfo Atomics(*this, LV); + return !Atomics.shouldUseLibcall(); + }); + return AtomicIsInline; } RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL, @@ -1587,6 +1602,12 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, llvm::AtomicOrdering AO, bool IsVolatile, AggValueSlot resultSlot) { + if (src.isConditional()) { + return EmitBranchOnConditionalLValue(src, [&](LValue LV) { + return EmitAtomicLoad(LV, loc, AO, IsVolatile, resultSlot); + }); + } + AtomicInfo Atomics(*this, src); return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO, IsVolatile); @@ -1989,6 +2010,12 @@ rvalue.getAggregateAddress().getElementType() == dest.getAddress(*this).getElementType()); + if (dest.isConditional()) { + return EmitBranchOnConditionalLValue(dest, [&](LValue LV) { + return EmitAtomicStore(rvalue, LV, AO, IsVolatile, isInit); + }); + } + AtomicInfo atomics(*this, dest); LValue LVal = atomics.getAtomicLValue(); @@ -2062,6 +2089,14 @@ assert(!Desired.isAggregate() || Desired.getAggregateAddress().getElementType() == Obj.getAddress(*this).getElementType()); + + if (Obj.isConditional()) { + return EmitBranchOnConditionalLValue(Obj, [&](LValue LV) { + return EmitAtomicCompareExchange(LV, Expected, Desired, Loc, Success, + Failure, IsWeak, Slot); + }); + } + AtomicInfo Atomics(*this, Obj); return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure, @@ -2070,7 +2105,24 @@ void CodeGenFunction::EmitAtomicUpdate( LValue LVal, llvm::AtomicOrdering AO, - const llvm::function_ref &UpdateOp, bool IsVolatile) { + const llvm::function_ref &UpdateOp, bool IsVolatile, + SourceLocation Loc) { + if (LVal.isConditional()) { + // FIXME: We can mostly EmitBranchOnConditionalLValue here, but there are + // two problems with that: + // 1) That can result in emitting the same expression twice, which might + // not be correct for some expression forms. + // 2) UpdateOp might expect to be called only once. That happens for the + // various OpenMP atomic update expressions of the form + // { v = x; x = f(x); } + // which want to capture a single previous value of x. + CGM.Error(Loc, "cannot compile this OpenMP atomic bitfield conditional " + "lvalue update yet"); + // Recover by pretending we only saw the first case in the conditional. + while (LVal.isConditional()) + LVal = LVal.getConditionalValue(0); + } + AtomicInfo Atomics(*this, LVal); Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile); } diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1244,7 +1244,7 @@ LV = EmitArraySubscriptExpr(cast(E), /*Accessed*/true); else LV = EmitLValue(E); - if (!isa(E) && !LV.isBitField() && LV.isSimple()) { + if (!isa(E) && LV.isSimple()) { SanitizerSet SkippedChecks; if (const auto *ME = dyn_cast(E)) { bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); @@ -1914,8 +1914,16 @@ Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext")); } - assert(LV.isBitField() && "Unknown LValue type!"); - return EmitLoadOfBitfieldLValue(LV, Loc); + if (LV.isBitField()) + return EmitLoadOfBitfieldLValue(LV, Loc); + + if (LV.isConditional()) { + return EmitBranchOnConditionalLValue(LV, [&](LValue LV) { // + return EmitLoadOfLValue(LV, Loc); + }); + } + + llvm_unreachable("unknown lvalue kind"); } RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, @@ -2053,8 +2061,16 @@ return; } - assert(Dst.isBitField() && "Unknown LValue type"); - return EmitStoreThroughBitfieldLValue(Src, Dst); + if (Dst.isBitField()) + return EmitStoreThroughBitfieldLValue(Src, Dst); + + if (Dst.isConditional()) { + return EmitBranchOnConditionalLValue(Dst, [&](LValue LV) { + return EmitStoreThroughLValue(Src, LV, isInit); + }); + } + + llvm_unreachable("unknown lvalue type"); } // There's special magic for assigning into an ARC-qualified l-value. @@ -4427,6 +4443,193 @@ return CGF.EmitLValue(Operand); } +LValue CodeGenFunction::MakeConditionalLValue(llvm::Value *Sel, LValue A, + LValue B) { + assert(getContext().hasSameUnqualifiedType(A.getType(), B.getType()) && + "conditional between lvalues of different types"); + + TBAAAccessInfo TBAAInfo = + CGM.mergeTBAAInfoForConditionalOperator(A.getTBAAInfo(), B.getTBAAInfo()); + + auto Vals = std::make_unique(A, B); + LValue Result = LValue::MakeConditional(Sel, Vals->Values, TBAAInfo); + ConditionalLValueStorage.push_back(std::move(Vals)); + return Result; +} + +/// Given an LValue that is usable in predecessor \p Pred of the current block, +/// get a corresponding LValue that is usable in this block. Replace the value +/// with 'undef' if we came from the other predecessor \p OtherPred. +static LValue getLValueOrUndef(CodeGenFunction &CGF, LValue LVal, + llvm::BasicBlock *Pred, + llvm::BasicBlock *OtherPred) { + auto MakeValuePHI = [&](llvm::Value *V) { + llvm::PHINode *NewV = CGF.Builder.CreatePHI(V->getType(), 2); + NewV->addIncoming(V, Pred); + NewV->addIncoming(llvm::UndefValue::get(V->getType()), OtherPred); + return NewV; + }; + + auto MakeAddressPHI = [&](Address Addr) { + if (!Addr.isValid()) + return Addr; + return Address(MakeValuePHI(Addr.getPointer()), Addr.getAlignment()); + }; + + if (LVal.isSimple()) + return LValue::MakeAddr(MakeAddressPHI(LVal.getAddress(CGF)), + LVal.getType(), CGF.getContext(), + LVal.getBaseInfo(), LVal.getTBAAInfo()); + if (LVal.isVectorElt()) { + Address A = MakeAddressPHI(LVal.getVectorAddress()); + return LValue::MakeVectorElt(A, MakeValuePHI(LVal.getVectorIdx()), + LVal.getType(), LVal.getBaseInfo(), + LVal.getTBAAInfo()); + } + if (LVal.isExtVectorElt()) + return LValue::MakeExtVectorElt(MakeAddressPHI(LVal.getExtVectorAddress()), + LVal.getExtVectorElts(), LVal.getType(), + LVal.getBaseInfo(), LVal.getTBAAInfo()); + if (LVal.isBitField()) + return LValue::MakeBitfield(MakeAddressPHI(LVal.getBitFieldAddress()), + LVal.getBitFieldInfo(), LVal.getType(), + LVal.getBaseInfo(), LVal.getTBAAInfo()); + if (LVal.isGlobalReg()) + return LValue::MakeGlobalReg(MakeAddressPHI(LVal.getGlobalRegAddress()), + LVal.getType()); + if (LVal.isMatrixElt()) { + Address A = MakeAddressPHI(LVal.getMatrixAddress()); + return LValue::MakeMatrixElt(A, MakeValuePHI(LVal.getMatrixIdx()), + LVal.getType(), LVal.getBaseInfo(), + LVal.getTBAAInfo()); + } + + if (LVal.isConditional()) { + LValue LHS = + getLValueOrUndef(CGF, LVal.getConditionalValue(0), Pred, OtherPred); + LValue RHS = + getLValueOrUndef(CGF, LVal.getConditionalValue(1), Pred, OtherPred); + return CGF.MakeConditionalLValue(MakeValuePHI(LVal.getCondition()), + LHS, RHS); + } + + llvm_unreachable("unknown lvalue kind"); +} + +LValue CodeGenFunction::EmitPHI(PHIValue A, PHIValue B) { + // Easy case: both lvalues are simple. In this case, we can just emit a single + // phi and produce a simple lvalue result. + if (A.Value.isSimple() && B.Value.isSimple()) { + llvm::PHINode *Phi = Builder.CreatePHI(A.Value.getPointer(*this)->getType(), + 2, "cond-lvalue"); + Phi->addIncoming(A.Value.getPointer(*this), A.Pred); + Phi->addIncoming(B.Value.getPointer(*this), B.Pred); + + CharUnits Alignment = + std::min(A.Value.getAlignment(), B.Value.getAlignment()); + AlignmentSource AlignSource = + std::max(A.Value.getBaseInfo().getAlignmentSource(), + B.Value.getBaseInfo().getAlignmentSource()); + TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( + A.Value.getTBAAInfo(), B.Value.getTBAAInfo()); + + Address Result(Phi, Alignment); + return MakeAddrLValue(Result, A.Value.getType(), + LValueBaseInfo(AlignSource), TBAAInfo); + } + + // Hard case: create a conditional lvalue merging the cases. + llvm::PHINode *Sel = Builder.CreatePHI( + llvm::Type::getInt1Ty(getLLVMContext()), 2, "cond.lval.sel"); + Sel->addIncoming(llvm::ConstantInt::getFalse(getLLVMContext()), A.Pred); + Sel->addIncoming(llvm::ConstantInt::getTrue(getLLVMContext()), B.Pred); + + // Make sure both LValues are usable in the continuation block. + LValue AVal = getLValueOrUndef(*this, A.Value, A.Pred, B.Pred); + LValue BVal = getLValueOrUndef(*this, B.Value, B.Pred, A.Pred); + return MakeConditionalLValue(Sel, AVal, BVal); +} + +/// Apply 'Get' to each value in a collection and build a PHI node from the +/// resulting values. Get should callable as if of type 'llvm::Value *Get(T)'. +/// +/// The projection may produce null pointers for some inputs. If all inputs +/// project to null pointers, a null pointer is returned. Otherwise, null +/// inputs are replaced by undef values. +template +static llvm::PHINode *makeProjectedPHI(CodeGenFunction &CGF, + ArrayRef> Values, Fn Get) { + llvm::PHINode *Phi = nullptr; + for (auto &PhiV : Values) { + llvm::Value *V = Get(PhiV.Value); + if (V) { + if (!Phi) { + Phi = CGF.Builder.CreatePHI(V->getType(), Values.size()); + for (auto &PrevPhiV : Values) { + if (PrevPhiV.Pred == PhiV.Pred) + break; + Phi->addIncoming(llvm::UndefValue::get(V->getType()), PrevPhiV.Pred); + } + } + Phi->addIncoming(V, PhiV.Pred); + } else if (Phi) { + Phi->addIncoming(llvm::UndefValue::get(Phi->getType()), PhiV.Pred); + } + } + return Phi; +} + +llvm::Value * +CodeGenFunction::EmitPHI(ArrayRef> Values) { + return makeProjectedPHI(*this, Values, [](llvm::Value *V) { return V; }); +} + +RValue CodeGenFunction::EmitPHI(ArrayRef> Values) { + assert(!Values.empty() && "no values to select between"); + RValue First = Values.front().Value; + + if (First.isScalar()) + return RValue::get(makeProjectedPHI( + *this, Values, [](RValue V) { return V.getScalarVal(); })); + + if (First.isComplex()) { + llvm::Value *Real = makeProjectedPHI( + *this, Values, [](RValue V) { return V.getComplexVal().first; }); + llvm::Value *Imag = makeProjectedPHI( + *this, Values, [](RValue V) { return V.getComplexVal().second; }); + return RValue::getComplex(Real, Imag); + } + + if (First.isAggregate()) { + llvm::SmallVector, 2> Addresses; + bool Volatile = false; + for (auto &V : Values) { + Addresses.push_back({V.Value.getAggregateAddress(), V.Pred}); + Volatile |= V.Value.isVolatileQualified(); + } + Address Addr = EmitPHI(Addresses); + return RValue::getAggregate(Addr, Volatile); + } + + llvm_unreachable("unknown rvalue kind"); +} + +Address CodeGenFunction::EmitPHI(ArrayRef> Values) { + CharUnits Alignment = CharUnits::Zero(); + llvm::Value *Ptr = + makeProjectedPHI(*this, Values, [&](Address A) -> llvm::Value * { + if (!A.isValid()) + return nullptr; + Alignment = Alignment.isZero() ? A.getAlignment() + : std::min(Alignment, A.getAlignment()); + return A.getPointer(); + }); + if (!Ptr) + return Address::invalid(); + assert(!Alignment.isZero() && "valid address had zero alignment?"); + return Address(Ptr, Alignment); +} + LValue CodeGenFunction:: EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { if (!expr->isGLValue()) { @@ -4477,9 +4680,6 @@ EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); eval.end(*this); - if (lhs && !lhs->isSimple()) - return EmitUnsupportedLValue(expr, "conditional operator"); - lhsBlock = Builder.GetInsertBlock(); if (lhs) Builder.CreateBr(contBlock); @@ -4490,30 +4690,16 @@ Optional rhs = EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); eval.end(*this); - if (rhs && !rhs->isSimple()) - return EmitUnsupportedLValue(expr, "conditional operator"); rhsBlock = Builder.GetInsertBlock(); EmitBlock(contBlock); - if (lhs && rhs) { - llvm::PHINode *phi = - Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue"); - phi->addIncoming(lhs->getPointer(*this), lhsBlock); - phi->addIncoming(rhs->getPointer(*this), rhsBlock); - Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment())); - AlignmentSource alignSource = - std::max(lhs->getBaseInfo().getAlignmentSource(), - rhs->getBaseInfo().getAlignmentSource()); - TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( - lhs->getTBAAInfo(), rhs->getTBAAInfo()); - return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), - TBAAInfo); - } else { - assert((lhs || rhs) && - "both operands of glvalue conditional are throw-expressions?"); - return lhs ? *lhs : *rhs; - } + if (lhs && rhs) + return EmitPHI({*lhs, lhsBlock}, {*rhs, rhsBlock}); + + assert((lhs || rhs) && + "both operands of glvalue conditional are throw-expressions?"); + return lhs ? *lhs : *rhs; } /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -5046,7 +5046,8 @@ EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); } else { // Perform compare-and-swap procedure. - EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); + EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified(), + Loc); } } return Res; diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h --- a/clang/lib/CodeGen/CGValue.h +++ b/clang/lib/CodeGen/CGValue.h @@ -159,6 +159,12 @@ void mergeForCast(const LValueBaseInfo &Info) { setAlignmentSource(Info.getAlignmentSource()); } + static LValueBaseInfo mergeForConditional(LValueBaseInfo A, + LValueBaseInfo B) { + AlignmentSource Weakest = AlignmentSource( + std::max(unsigned(A.AlignSource), unsigned(B.AlignSource))); + return LValueBaseInfo(Weakest); + } }; /// LValue - This represents an lvalue references. Because C/C++ allow @@ -171,7 +177,8 @@ BitField, // This is a bitfield l-value, use getBitfield*. ExtVectorElt, // This is an extended vector subset, use getExtVectorComp GlobalReg, // This is a register l-value, use getGlobalReg() - MatrixElt // This is a matrix element, use getVector* + MatrixElt, // This is a matrix element, use getVector* + Conditional // This is a conditional involving a non-simple lvalue. } LVType; llvm::Value *V; @@ -185,6 +192,10 @@ // BitField start bit and size const CGBitFieldInfo *BitFieldInfo; + + // For a conditional lvalue, the two possible underlying lvalues. + // V is an i1 indicating which one of these is selected. + LValue (*CondVals)[2]; }; QualType Type; @@ -256,6 +267,7 @@ bool isExtVectorElt() const { return LVType == ExtVectorElt; } bool isGlobalReg() const { return LVType == GlobalReg; } bool isMatrixElt() const { return LVType == MatrixElt; } + bool isConditional() const { return LVType == Conditional; } bool isVolatileQualified() const { return Quals.hasVolatile(); } bool isRestrictQualified() const { return Quals.hasRestrict(); } @@ -386,6 +398,21 @@ // global register lvalue llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; } + Address getGlobalRegAddress() const { + return Address(getGlobalReg(), getAlignment()); + } + + // conditional lvalue + llvm::Value *getCondition() const { + assert(isConditional()); + return V; + } + + LValue getConditionalValue(bool Which) const { + assert(isConditional()); + return (*CondVals)[Which]; + } + static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) { Qualifiers qs = type.getQualifiers(); @@ -462,6 +489,23 @@ return R; } + static LValue MakeConditional(llvm::Value *Cond, LValue (&Vals)[2], + TBAAAccessInfo TBAAInfo) { + LValue R; + R.LVType = Conditional; + R.V = Cond; + R.CondVals = &Vals; + // The properties of a conditional LValue should generally not matter, + // because any use should generate a branch and handle the two cases + // independently. + R.Initialize(Vals[0].getType(), Vals[0].getQuals(), + std::min(Vals[0].getAlignment(), Vals[1].getAlignment()), + LValueBaseInfo::mergeForConditional(Vals[0].getBaseInfo(), + Vals[1].getBaseInfo()), + TBAAInfo); + return R; + } + RValue asAggregateRValue(CodeGenFunction &CGF) const { return RValue::getAggregate(getAddress(CGF), isVolatileQualified()); } diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -229,6 +229,24 @@ } }; +/// An input to a PHI node. +template struct PHIValue { + T Value; + llvm::BasicBlock *Pred; + + template static PHIValue make(llvm::BasicBlock *Pred, Fn F) { + return {F(), Pred}; + } +}; +template<> struct PHIValue { + llvm::BasicBlock *Pred; + + template static PHIValue make(llvm::BasicBlock *Pred, Fn F) { + F(); + return {Pred}; + } +}; + /// CodeGenFunction - This class organizes the per-function state that is used /// while generating LLVM code. class CodeGenFunction : public CodeGenTypeCache { @@ -1455,6 +1473,13 @@ // enter/leave scopes. llvm::DenseMap VLASizeMap; + struct ConditionalLValueStorageType { + ConditionalLValueStorageType(LValue A, LValue B) : Values{A, B} {} + LValue Values[2]; + }; + std::vector> + ConditionalLValueStorage; + /// A block containing a single 'unreachable' instruction. Created /// lazily by getUnreachableBlock(). llvm::BasicBlock *UnreachableBlock = nullptr; @@ -2481,6 +2506,62 @@ AggValueSlot::DoesNotOverlap); } + /// Make a conditional lvalue that selects between the two given LValues. + LValue MakeConditionalLValue(llvm::Value *Sel, LValue A, LValue B); + + /// Emit a PHI between values. + llvm::Value *EmitPHI(llvm::ArrayRef> Values); + llvm::Value *EmitPHI(PHIValue A, PHIValue B) { + return EmitPHI({A, B}); + } + + LValue EmitPHI(PHIValue A, PHIValue B); + + RValue EmitPHI(llvm::ArrayRef> Values); + RValue EmitPHI(PHIValue A, PHIValue B) { + return EmitPHI({A, B}); + } + + Address EmitPHI(llvm::ArrayRef> Values); + Address EmitPHI(PHIValue
A, PHIValue
B) { + return EmitPHI({A, B}); + } + + template + std::pair EmitPHI(PHIValue> A, + PHIValue> B) { + return {EmitPHI({A.Value.first, A.Pred}, {B.Value.first, B.Pred}), + EmitPHI({A.Value.second, A.Pred}, {B.Value.second, B.Pred})}; + } + + void EmitPHI(PHIValue A, PHIValue B) {} + + /// Emit an atomic operation on both possible cases of an atomic lvalue. + template + auto EmitBranchOnConditionalLValue(LValue CondLV, Fn F) + -> decltype(F(CondLV)) { + assert(CondLV.isConditional() && "not a conditional LValue"); + using Result = decltype(F(CondLV)); + + llvm::BasicBlock *LHSBlock = createBasicBlock("cond.lval.lhs"); + llvm::BasicBlock *RHSBlock = createBasicBlock("cond.lval.rhs"); + llvm::BasicBlock *ContBlock = createBasicBlock("cond.lval.cont"); + Builder.CreateCondBr(CondLV.getCondition(), RHSBlock, LHSBlock); + + EmitBlock(LHSBlock); + PHIValue LHSValue = PHIValue::make( + LHSBlock, [&] { return F(CondLV.getConditionalValue(0)); }); + EmitBranch(ContBlock); + + EmitBlock(RHSBlock); + PHIValue RHSValue = PHIValue::make( + RHSBlock, [&] { return F(CondLV.getConditionalValue(1)); }); + EmitBranch(ContBlock); + + EmitBlock(ContBlock); + return EmitPHI(LHSValue, RHSValue); + } + /// Emit a cast to void* in the appropriate address space. llvm::Value *EmitCastToVoidPtr(llvm::Value *value); @@ -3631,7 +3712,7 @@ void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref &UpdateOp, - bool IsVolatile); + bool IsVolatile, SourceLocation Loc); /// EmitToMemory - Change a scalar value from its value /// representation to its in-memory representation. diff --git a/clang/test/CodeGenCXX/conditional-expr-lvalue.cpp b/clang/test/CodeGenCXX/conditional-expr-lvalue.cpp --- a/clang/test/CodeGenCXX/conditional-expr-lvalue.cpp +++ b/clang/test/CodeGenCXX/conditional-expr-lvalue.cpp @@ -1,8 +1,14 @@ -// RUN: %clang_cc1 -emit-llvm-only %s -void f(bool flag) { +// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - -fopenmp | FileCheck %s + +// CHECK-LABEL: define {{.*}} @{{.*}}simple_scalar_cond +void simple_scalar_cond(bool flag) { + // CHECK: store i32 1, i32* %[[A:.*]], int a = 1; + // CHECK: store i32 2, i32* %[[B:.*]], int b = 2; + // CHECK: %[[LVAL:.*]] = phi i32* [ %[[A]], {{.*}} ], [ %[[B]], {{.*}} ] + // CHECK: store i32 3, i32* %[[LVAL]], (flag ? a : b) = 3; } @@ -14,7 +20,173 @@ A sub() const; void foo() const; }; + // CHECK-LABEL: define {{.*}} @{{.*}}test0 void foo(bool cond, const A &a) { + // CHECK: %[[TMP:.*]] = alloca %"struct.test0::A", + // CHECK: br i1 + // + // CHECK: call void @_ZN5test01AC1ERKS0_(%"struct.test0::A"* %[[TMP]], + // CHECK: br label + // + // CHECK: call void @_ZNK5test01A3subEv(%"struct.test0::A"* sret align 1 %[[TMP]], + // CHECK: br label + // + // CHECK: call void @_ZNK5test01A3fooEv(%"struct.test0::A"* %[[TMP]]) (cond ? a : a.sub()).foo(); } } + +namespace bitfields { + struct A { + int m : 3; + int n : 5; + } a1, a2; + + // CHECK-LABEL: define {{.*}} @{{.*}}load + int load(bool b) { + // CHECK: br i1 + // + // CHECK: load {{.*}}@_ZN9bitfields2a1E, i32 0, i32 0 + // CHECK: shl {{.*}}, 5 + // CHECK: ashr {{.*}}, 5 + // CHECK: br + // + // CHECK: load {{.*}}@_ZN9bitfields2a2E, i32 0, i32 0 + // CHECK: ashr {{.*}}, 3 + // CHECK: br + return b ? a1.m : a2.n; + } + + // CHECK-LABEL: define {{.*}} @{{.*}}store + void store(bool b, int k) { + // CHECK: br i1 + // CHECK: %[[SEL:.*]] = phi i1 + // CHECK: %[[A1:.*]] = phi i8* {{.*}} @_ZN9bitfields2a1E, {{.*}} [ undef, + // CHECK: %[[A2:.*]] = phi i8* {{.*}} @_ZN9bitfields2a2E, {{.*}} [ undef, + // CHECK: br i1 %[[SEL]] + // + // CHECK: load {{.*}}%[[A1]] + // CHECK: and {{.*}}, 7 + // CHECK: and {{.*}}, -8 + // CHECK: or + // CHECK: store + // CHECK: br label + // + // CHECK: load {{.*}}%[[A2]] + // CHECK: and {{.*}}, 31 + // CHECK: shl {{.*}}, 3 + // CHECK: and {{.*}}, 7 + // CHECK: or + // CHECK: store + // CHECK: br label + (b ? a1.m : a2.n) = k; + } + + // CHECK-LABEL: define {{.*}} @{{.*}}update + void update(bool b, int k) { + // 1: Branch on condition and evaluate the chosen lvalue. + // + // CHECK: br i1 + // CHECK: %[[SEL:.*]] = phi i1 + // CHECK: %[[A1:.*]] = phi i8* {{.*}} @_ZN9bitfields2a1E, {{.*}} [ undef, + // CHECK: %[[A2:.*]] = phi i8* {{.*}} @_ZN9bitfields2a2E, {{.*}} [ undef, + // CHECK: br i1 %[[SEL]] + // + // 2: Branch on condition and load the relevant value. + // + // CHECK: load {{.*}}%[[A1]] + // CHECK: shl {{.*}}, 5 + // CHECK: ashr {{.*}}, 5 + // CHECK: br + // + // CHECK: load {{.*}}%[[A2]] + // CHECK: ashr {{.*}}, 3 + // CHECK: br + // + // 3: Add k. + // + // CHECK: add nsw + // CHECK: br i1 + // + // 4: Branch on condition and store the new value. + // + // CHECK: load {{.*}}%[[A1]] + // CHECK: and {{.*}}, 7 + // CHECK: and {{.*}}, -8 + // CHECK: or + // CHECK: store + // CHECK: br label + // + // CHECK: load {{.*}}%[[A2]] + // CHECK: and {{.*}}, 31 + // CHECK: shl {{.*}}, 3 + // CHECK: and {{.*}}, 7 + // CHECK: or + // CHECK: store + // CHECK: br label + (b ? a1.m : a2.n) += k; + } + + // CHECK-LABEL: define {{.*}} @{{.*}}atomic_load + int atomic_load(bool b) { + // CHECK: br i1 + // CHECK: %[[SEL:.*]] = phi i1 + // CHECK: %[[A1:.*]] = phi i8* {{.*}} @_ZN9bitfields2a1E, {{.*}} [ undef, + // CHECK: %[[A2:.*]] = phi i8* {{.*}} @_ZN9bitfields2a2E, {{.*}} [ undef, + // CHECK: br i1 %[[SEL]] + // + // CHECK: %[[A1b:.*]] = getelementptr i8, i8* %[[A1]], i64 0 + // CHECK: %[[A1c:.*]] = bitcast i8* %[[A1b]] to i32* + // CHECK: load atomic {{.*}}%[[A1c]] + // CHECK: shl i32 {{.*}}, 29 + // CHECK: ashr i32 {{.*}}, 29 + // CHECK: br label + // + // CHECK: %[[A2b:.*]] = getelementptr i8, i8* %[[A2]], i64 0 + // CHECK: %[[A2c:.*]] = bitcast i8* %[[A2b]] to i32* + // CHECK: load atomic {{.*}}%[[A2c]] + // CHECK: shl i32 {{.*}}, 24 + // CHECK: ashr i32 {{.*}}, 27 + // CHECK: br label + // + // CHECK: call void @__kmpc_flush + int v; + #pragma omp atomic seq_cst read + v = (b ? a1.m : a2.n); + return v; + } + + // CHECK-LABEL: define {{.*}} @{{.*}}atomic_store + void atomic_store(bool b, int k) { + // CHECK: br i1 + // CHECK: %[[SEL:.*]] = phi i1 + // CHECK: %[[A1:.*]] = phi i8* {{.*}} @_ZN9bitfields2a1E, {{.*}} [ undef, + // CHECK: %[[A2:.*]] = phi i8* {{.*}} @_ZN9bitfields2a2E, {{.*}} [ undef, + // CHECK: br i1 %[[SEL]] + // + // CHECK: %[[A1b:.*]] = getelementptr i8, i8* %[[A1]], i64 0 + // CHECK: %[[A1c:.*]] = bitcast i8* %[[A1b]] to i32* + // CHECK: load atomic {{.*}}%[[A1c]] + // CHECK: and i32 {{.*}}, 7 + // CHECK: and i32 {{.*}}, -8 + // CHECK: or + // CHECK: cmpxchg + // CHECK: br i1 + // CHECK: br label + // + // CHECK: %[[A2b:.*]] = getelementptr i8, i8* %[[A2]], i64 0 + // CHECK: %[[A2c:.*]] = bitcast i8* %[[A2b]] to i32* + // CHECK: load atomic {{.*}}%[[A2c]] + // CHECK: and i32 {{.*}}, 31 + // CHECK: shl i32 {{.*}}, 3 + // CHECK: and i32 {{.*}}, -249 + // CHECK: or + // CHECK: cmpxchg + // CHECK: br i1 + // CHECK: br label + // + // CHECK: call void @__kmpc_flush + #pragma omp atomic seq_cst write + (b ? a1.m : a2.n) = k; + } +}