diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h --- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h @@ -799,6 +799,160 @@ /// Value *getOMPCriticalRegionLock(StringRef CriticalName); + /// Callback type for Atomic Expression update + /// ex: + /// unsigned x = 0; + /// #pragma omp atomic update + /// x = Expr(x_old); : expr is any legal operation + /// \param XOld the value of atomic memory address to use for update + /// \param IRB reference to IRBuilder to use + /// \returns Value to update X to. + using AtomicUpdateCallbackTy = + const function_ref &IRB)>; + +private: + enum AtomicKind { Read, Write, Update, Capture }; + + /// Determine whether to emit flush or not + /// + /// \param Loc The insert and source location description. + /// \param AO The required atomic ordering + /// \param AK The OpenMP atomic operation kind used. + /// + /// \returns wether a flush was emitted or not + bool CheckAndEmitFlushAfterAtomic(const LocationDescription &Loc, + AtomicOrdering AO, AtomicKind AK); + + /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X + /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) + /// Only Scalar data types. + /// + /// \param AllocIP Instruction to create AllocaInst before. + /// \param X The target atomic pointer to be updated + /// \param Expr The value to update X with. + /// \param AO Atomic ordering of the generated atomic instructions. + /// \param RMWOp The binary operation used for update. If operation is + /// not supported by atomicRMW, or belong to + /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based + /// atomic will be generated. + /// \param UpdateOp Code generator for complex expressions that cannot be + /// expressed through atomicrmw instruction. + /// \param VolatileX true if \a X volatile? + /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of the + /// update expression, false otherwise. + /// (e.g. true for X = X BinOp Expr) + /// \returns a pair of the old value of X before the update, and the value + /// used for the update. + std::pair emitAtomicUpdate(Instruction *AllocIP, Value *X, + Value *Expr, AtomicOrdering AO, + AtomicRMWInst::BinOp RMWOp, + AtomicUpdateCallbackTy &UpdateOp, + bool VolatileX, + bool IsXLHSInRHSPart); + + /// emit the binary operation described by \p RMWOp, using \p Src1 and \p Src2 + /// \Return the Instruction + Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, + AtomicRMWInst::BinOp RMWOp); + +public: + // a struct to pack relevant information while generating atomic Ops + struct AtomicOpValue { + Value *Var = nullptr; + bool isSigned = false; + bool isVolatile = false; + }; + + /// Emit atomic Read for : V = X --- Only Scalar data types. + /// + /// \param Loc The insert and source location description. + /// \param X The target pointer to be atomically read + /// \param V Memory address where to store atomically read + /// value + /// \param AO Atomic ordering of the generated atomic + /// instructions. + /// + /// \return Insertion point after generated atomic read IR. + InsertPointTy createAtomicRead(const LocationDescription &Loc, + AtomicOpValue &X, AtomicOpValue &V, + AtomicOrdering AO); + + /// Emit atomic write for : X = Expr --- Only Scalar data types. + /// + /// \param Loc The insert and source location description. + /// \param X The target pointer to be atomically written to + /// \param Expr The value to store. + /// \param AO Atomic ordering of the generated atomic instructions. + /// + /// \return Insertion point after generated atomic Write IR. + InsertPointTy createAtomicWrite(const LocationDescription &Loc, + AtomicOpValue &X, Value *Expr, + AtomicOrdering AO); + + /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X + /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) + /// Only Scalar data types. + /// + /// \param Loc The insert and source location description. + /// \param AllocIP Instruction to create AllocaInst before. + /// \param X The target atomic pointer to be updated + /// \param Expr The value to update X with. + /// \param AO Atomic ordering of the generated atomic instructions. + /// \param RMWOp The binary operation used for update. If operation is + /// not supported by atomicRMW, or belong to + /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based atomic + /// will be generated. + /// \param UpdateOp Code generator for complex expressions that cannot be + /// expressed through atomicrmw instruction. + /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of + /// the update expression, false otherwise. + /// (e.g. true for X = X BinOp Expr) + /// + /// \return Insertion point after generated atomic update IR. + InsertPointTy createAtomicUpdate(const LocationDescription &Loc, + Instruction *AllocIP, AtomicOpValue &X, + Value *Expr, AtomicOrdering AO, + AtomicRMWInst::BinOp RMWOp, + AtomicUpdateCallbackTy &UpdateOp, + bool IsXLHSInRHSPart); + + /// Emit atomic update for constructs: --- Only Scalar data types + /// V = X; X = X BinOp Expr , + /// X = X BinOp Expr; V = X, + /// V = X; X = Expr BinOp X, + /// X = Expr BinOp X; V = X, + /// V = X; X = UpdateOp(X), + /// X = UpdateOp(X); V = X, + /// + /// \param Loc The insert and source location description. + /// \param AllocIP Instruction to create AllocaInst before. + /// \param X The target atomic pointer to be updated + /// \param V Memory address where to store captured value + /// \param Expr The value to update X with. + /// \param AO Atomic ordering of the generated atomic instructions + /// \param RMWOp The binary operation used for update. If operation is + /// not supported by atomicRMW, or belong to + /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based atomic + /// will be generated. + /// \param UpdateOp Code generator for complex expressions that cannot be + /// expressed through atomicrmw instruction. + /// \param UpdateExpr true if X is an in place update of the form + /// X = X BinOp Expr or X = Expr BinOp X + /// false otherwise. + /// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the + /// update expression, false otherwise. + /// (e.g. true for X = X BinOp Expr) + /// \param IsPostfixUpdate true if original value of 'x' must be stored in + /// 'v', not an updated one. + /// + /// \return Insertion point after generated atomic capture IR. + InsertPointTy + createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP, + AtomicOpValue &X, AtomicOpValue &V, Value *Expr, + AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, + AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, + bool IsPostfixUpdate, bool IsXLHSInRHSPart); + /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -1919,6 +1919,309 @@ return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name); } +bool OpenMPIRBuilder::CheckAndEmitFlushAfterAtomic( + const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { + + if (AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) + llvm_unreachable("Unexpected ordering."); + + bool Flush = false; + // Currently Flush RT call still doesn't take memory_ordering, so for when + // that happens, this tries to do the resolution of which atomic ordering + // to use with but issue the flush call + + // todo: when the above happens uncomment this & all relevant lines below. + // llvm::AtomicOrdering FlushAO; + + if (AK == Read) { + if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || + AO == AtomicOrdering::SequentiallyConsistent) { + // FlushAO = AtomicOrdering::Acquire; + Flush = true; + } + } else if ((AK == Write || AK == Update)) { + if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || + AO == AtomicOrdering::SequentiallyConsistent) { + // FlushAO = AtomicOrdering::Release + Flush = true; + } + } else if (AK == Capture) { + if (AO == AtomicOrdering::Acquire) { + // Aquire ordering + Flush = true; + } else if (AO == AtomicOrdering::Release) { + // Release ordering + Flush = true; + } else if (AO == AtomicOrdering::AcquireRelease || + AO == AtomicOrdering::SequentiallyConsistent) { + // AquireRelease ordering + Flush = true; + } + } + + if (Flush) + emitFlush(Loc); + // AtomicOrdering::Monotonic or all other case combinations + // do nothing + return Flush; +} + +OpenMPIRBuilder::InsertPointTy +OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, + AtomicOpValue &X, AtomicOpValue &V, + AtomicOrdering AO) { + + if (!updateToLocation(Loc)) + return Loc.IP; + + auto XTy = X.Var->getType(); + assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); + auto XElemTy = XTy->getPointerElementType(); + assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || + XElemTy->isPointerTy()) && + "OMP atomic read expected a scalar type"); + + Value *XRead = nullptr; + + if (XElemTy->isIntegerTy()) { + LoadInst *XLD = + Builder.CreateLoad(XElemTy, X.Var, X.isVolatile, "omp.atomic.read"); + XLD->setAtomic(AO); + XRead = cast(XLD); + } else { + // we need to bitcast and perform atomic op as integer + unsigned addrspace = cast(XTy)->getAddressSpace(); + IntegerType *IntCastTy = + IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); + auto XBCast = + Builder.CreateBitCast(X.Var, IntCastTy->getPointerTo(addrspace)); + LoadInst *XLoad = + Builder.CreateLoad(IntCastTy, XBCast, X.isVolatile, "OMP-atomic-load"); + XLoad->setAtomic(AO); + if (XElemTy->isFloatingPointTy()) { + XRead = Builder.CreateBitCast(XLoad, XElemTy, "Atomic-FltCast"); + } else { + XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "Atomic-PtrCast"); + } + } + CheckAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); + Builder.CreateStore(XRead, V.Var, V.isVolatile); + return Builder.saveIP(); +} + +OpenMPIRBuilder::InsertPointTy +OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, + AtomicOpValue &X, Value *Expr, + AtomicOrdering AO) { + + if (!updateToLocation(Loc)) + return Loc.IP; + + auto XTy = X.Var->getType(); + assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); + auto XElemTy = XTy->getPointerElementType(); + assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || + XElemTy->isPointerTy()) && + "OMP atomic write expected a scalar type"); + + if (XElemTy->isIntegerTy()) { + StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.isVolatile); + XSt->setAtomic(AO); + } else { + // we need to bitcast and perform atomic op as integer + unsigned addrspace = cast(XTy)->getAddressSpace(); + IntegerType *IntCastTy = + IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); + auto XBCast = + Builder.CreateBitCast(X.Var, IntCastTy->getPointerTo(addrspace)); + StoreInst *XSt = Builder.CreateStore(Expr, XBCast, X.isVolatile); + XSt->setAtomic(AO); + } + + CheckAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); + return Builder.saveIP(); +} + +OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( + const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, + Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, + AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart) { + if (!updateToLocation(Loc)) + return Loc.IP; + + auto XTy = X.Var->getType(); + assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); + auto XElemTy = XTy->getPointerElementType(); + assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || + XElemTy->isPointerTy()) && + "OMP atomic update expected a scalar type"); + assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && + "OpenMP atomic does not support LT or GT operations"); + + emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.isVolatile, + IsXLHSInRHSPart); + CheckAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); + return Builder.saveIP(); +} + +Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, + AtomicRMWInst::BinOp RMWOp) { + switch (RMWOp) { + case AtomicRMWInst::Add: + return Builder.CreateAdd(Src1, Src2); + case AtomicRMWInst::Sub: + return Builder.CreateSub(Src1, Src2); + case AtomicRMWInst::And: + return Builder.CreateAnd(Src1, Src2); + case AtomicRMWInst::Nand: + return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); + case AtomicRMWInst::Or: + return Builder.CreateOr(Src1, Src2); + case AtomicRMWInst::Xor: + return Builder.CreateXor(Src1, Src2); + case AtomicRMWInst::Xchg: + case AtomicRMWInst::FAdd: + case AtomicRMWInst::FSub: + case AtomicRMWInst::BAD_BINOP: + case AtomicRMWInst::Max: + case AtomicRMWInst::Min: + default: + llvm_unreachable("Unsupported atomic update operation"); + } + return nullptr; +} + +std::pair +OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, + AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, + AtomicUpdateCallbackTy &UpdateOp, + bool VolatileX, bool IsXLHSInRHSPart) { + + auto XElemTy = X->getType()->getPointerElementType(); + + bool doCmpExch = + ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) || + (RMWOp == AtomicRMWInst::FSub) || + (RMWOp == AtomicRMWInst::Sub && !IsXLHSInRHSPart); + + std::pair Res; + if (XElemTy->isIntegerTy() && !doCmpExch) { + Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); + // not needed except in case of postfix captures. Generate anyway for + // consistency with the else part. Will be removed with any DCE pass. + Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); + } else { + unsigned addrspace = cast(X->getType())->getAddressSpace(); + IntegerType *IntCastTy = + IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); + auto XBCast = Builder.CreateBitCast(X, IntCastTy->getPointerTo(addrspace)); + auto *OldVal = + Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); + OldVal->setAtomic(AO); + /// CurBB + /// | /---\ + /// ContBB | + /// | \---/ + /// ExitBB + auto *CurBB = Builder.GetInsertBlock(); + auto CurBBTI = CurBB->getTerminator(); + CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); + auto *ExitBB = + CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); + auto *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), + X->getName() + ".atomic.cont"); + ContBB->getTerminator()->eraseFromParent(); + Builder.SetInsertPoint(ContBB); + llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); + PHI->addIncoming(OldVal, CurBB); + auto NewAtomicAddr = Builder.CreateAlloca(XElemTy); + NewAtomicAddr->setName(X->getName() + "X.Temp"); + NewAtomicAddr->moveBefore(AllocIP); + IntegerType *NewAtomicCastTy = + IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); + bool isIntTy = XElemTy->isIntegerTy(); + auto NewAtomicIntAddr = + (isIntTy) + ? NewAtomicAddr + : Builder.CreateBitCast(NewAtomicAddr, + NewAtomicCastTy->getPointerTo(addrspace)); + Value *OldExprVal = PHI; + if (!isIntTy) { + if (XElemTy->isFloatingPointTy()) { + OldExprVal = Builder.CreateBitCast(PHI, XElemTy, + X->getName() + ".Atomic.FltCast"); + } else { + OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, + X->getName() + ".Atomic.PtrCast"); + } + } + if (Builder.GetInsertPoint() == ContBB->end()) + llvm::errs() << "end block\n"; + else + ContBB->dump(); + Value *upd = UpdateOp(OldExprVal, Builder); + Builder.CreateStore(upd, NewAtomicAddr); + auto *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr); + auto XAddr = + (isIntTy) + ? X + : Builder.CreateBitCast(X, IntCastTy->getPointerTo(addrspace)); + auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); + auto Result = Builder.CreateAtomicCmpXchg(XAddr, OldExprVal, DesiredVal, + llvm::MaybeAlign(), AO, Failure); + Result->setVolatile(VolatileX); + auto *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); + auto *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); + PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); + Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); + + Res.first = OldExprVal; + Res.second = upd; + + // set Insertion point in exit block + if (UnreachableInst *ExitTI = + dyn_cast(ExitBB->getTerminator())) { + CurBBTI->eraseFromParent(); + Builder.SetInsertPoint(ExitBB); + } else { + Builder.SetInsertPoint(ExitTI); + } + } + + return Res; +} + +OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( + const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, + AtomicOpValue &V, Value *Expr, AtomicOrdering AO, + AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, + bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart) { + if (!updateToLocation(Loc)) + return Loc.IP; + + auto XTy = X.Var->getType(); + assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); + auto XElemTy = XTy->getPointerElementType(); + assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || + XElemTy->isPointerTy()) && + "OMP atomic capture expected a scalar type"); + assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && + "OpenMP atomic does not support LT or GT operations"); + + // if UpdateExpr is 'x' is updated with some additional value. + // 'x' is simply rewritten with some 'expr'. + // make monotonic ordering we have different + auto AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); + auto Result = emitAtomicUpdate(AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp, + X.isVolatile, IsXLHSInRHSPart); + + Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); + Builder.CreateStore(CapturedVal, V.Var, V.isVolatile); + + CheckAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); + return Builder.saveIP(); +} + // Create all simple and struct types exposed by the runtime and remember // the llvm::PointerTypes of them for easy access later. void OpenMPIRBuilder::initializeTypes(Module &M) { diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp --- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp +++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp @@ -1984,4 +1984,194 @@ EXPECT_EQ(SingleEndCI->getArgOperand(1), SingleEntryCI->getArgOperand(1)); } +TEST_F(OpenMPIRBuilderTest, OMPAtomicRead) { + OpenMPIRBuilder OMPBuilder(*M); + OMPBuilder.initialize(); + F->setName("func"); + IRBuilder<> Builder(BB); + + OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL}); + + IntegerType *Int32 = Type::getInt32Ty(M->getContext()); + AllocaInst *XVal = Builder.CreateAlloca(Int32); + XVal->setName("AtomicVar"); + AllocaInst *VVal = Builder.CreateAlloca(Int32); + VVal->setName("AtomicRead"); + AtomicOrdering AO = AtomicOrdering::Monotonic; + OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false}; + OpenMPIRBuilder::AtomicOpValue V = {VVal, false, false}; + + BasicBlock *EntryBB = BB; + + OMPBuilder.createAtomicRead(Loc, X, V, AO); + LoadInst *AtomicLoad = nullptr; + StoreInst *StoreofAtomic = nullptr; + + for (auto &FI : *EntryBB) { + Instruction *cur = &FI; + if (isa(cur)) { + AtomicLoad = cast(cur); + if (AtomicLoad->getPointerOperand() == XVal) + continue; + AtomicLoad = nullptr; + } else if (isa(cur)) { + StoreofAtomic = cast(cur); + if (StoreofAtomic->getPointerOperand() == VVal) + continue; + StoreofAtomic = nullptr; + } + } + + EXPECT_NE(AtomicLoad, nullptr); + EXPECT_TRUE(AtomicLoad->isAtomic()); + + EXPECT_NE(StoreofAtomic, nullptr); + EXPECT_EQ(StoreofAtomic->getValueOperand(), AtomicLoad); +} + +TEST_F(OpenMPIRBuilderTest, OMPAtomicWrite) { + OpenMPIRBuilder OMPBuilder(*M); + OMPBuilder.initialize(); + F->setName("func"); + IRBuilder<> Builder(BB); + + OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL}); + + LLVMContext &Ctx = M->getContext(); + IntegerType *Int32 = Type::getInt32Ty(Ctx); + AllocaInst *XVal = Builder.CreateAlloca(Int32); + XVal->setName("AtomicVar"); + OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false}; + AtomicOrdering AO = AtomicOrdering::Monotonic; + ConstantInt *ValToWrite = ConstantInt::get(Type::getInt32Ty(Ctx), 1U); + + BasicBlock *EntryBB = BB; + + OMPBuilder.createAtomicWrite(Loc, X, ValToWrite, AO); + + StoreInst *StoreofAtomic = nullptr; + + for (auto &FI : *EntryBB) { + Instruction *cur = &FI; + if (isa(cur)) { + StoreofAtomic = cast(cur); + if (StoreofAtomic->getPointerOperand() == XVal) + continue; + StoreofAtomic = nullptr; + } + } + + EXPECT_NE(StoreofAtomic, nullptr); + EXPECT_TRUE(StoreofAtomic->isAtomic()); + EXPECT_EQ(StoreofAtomic->getValueOperand(), ValToWrite); +} + +TEST_F(OpenMPIRBuilderTest, OMPAtomicUpdate) { + OpenMPIRBuilder OMPBuilder(*M); + OMPBuilder.initialize(); + F->setName("func"); + IRBuilder<> Builder(BB); + + OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL}); + + IntegerType *Int32 = Type::getInt32Ty(M->getContext()); + AllocaInst *XVal = Builder.CreateAlloca(Int32); + XVal->setName("AtomicVar"); + Builder.CreateStore(ConstantInt::get(Type::getInt32Ty(Ctx), 0U), XVal); + OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false}; + AtomicOrdering AO = AtomicOrdering::Monotonic; + ConstantInt *ConstVal = ConstantInt::get(Type::getInt32Ty(Ctx), 1U); + Value *Expr = nullptr; + AtomicRMWInst::BinOp RMWOp = AtomicRMWInst::Sub; + bool IsXLHSInRHSPart = false; + + BasicBlock *EntryBB = BB; + Instruction *AllocIP = EntryBB->getFirstNonPHI(); + Value *Sub = nullptr; + + auto UpdateOp = [&](Value *atomic, IRBuilder<> &IRB) { + Sub = IRB.CreateSub(ConstVal, atomic); + return Sub; + }; + + OMPBuilder.createAtomicUpdate(Builder, AllocIP, X, Expr, AO, RMWOp, UpdateOp, + IsXLHSInRHSPart); + BasicBlock *contBB = EntryBB->getSingleSuccessor(); + BranchInst *contTI = dyn_cast(contBB->getTerminator()); + EXPECT_NE(contTI, nullptr); + BasicBlock *EndBB = contTI->getSuccessor(0); + EXPECT_TRUE(contTI->isConditional()); + EXPECT_EQ(contTI->getSuccessor(1), contBB); + + PHINode *phi = dyn_cast(&contBB->front()); + EXPECT_NE(phi, nullptr); + EXPECT_EQ(phi->getNumIncomingValues(), 2U); + EXPECT_EQ(phi->getIncomingBlock(0), EntryBB); + EXPECT_EQ(phi->getIncomingBlock(1), contBB); + + EXPECT_EQ(Sub->getNumUses(), 1U); + StoreInst *St = dyn_cast(Sub->user_back()); + AllocaInst *UpdateTemp = dyn_cast(St->getPointerOperand()); + + ExtractValueInst *ExVI1 = + dyn_cast(phi->getIncomingValueForBlock(contBB)); + EXPECT_NE(ExVI1, nullptr); + AtomicCmpXchgInst *CmpExchg = + dyn_cast(ExVI1->getAggregateOperand()); + EXPECT_NE(CmpExchg, nullptr); + EXPECT_EQ(CmpExchg->getPointerOperand(), XVal); + EXPECT_EQ(CmpExchg->getCompareOperand(), phi); + EXPECT_EQ(CmpExchg->getSuccessOrdering(), AtomicOrdering::Monotonic); + + LoadInst *Ld = dyn_cast(CmpExchg->getNewValOperand()); + EXPECT_NE(Ld, nullptr); + EXPECT_EQ(UpdateTemp, Ld->getPointerOperand()); +} + +TEST_F(OpenMPIRBuilderTest, OMPAtomicCapture) { + OpenMPIRBuilder OMPBuilder(*M); + OMPBuilder.initialize(); + F->setName("func"); + IRBuilder<> Builder(BB); + + OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL}); + + LLVMContext &Ctx = M->getContext(); + IntegerType *Int32 = Type::getInt32Ty(Ctx); + AllocaInst *XVal = Builder.CreateAlloca(Int32); + XVal->setName("AtomicVar"); + AllocaInst *VVal = Builder.CreateAlloca(Int32); + VVal->setName("AtomicCapTar"); + StoreInst *Init = + Builder.CreateStore(ConstantInt::get(Type::getInt32Ty(Ctx), 0U), XVal); + + OpenMPIRBuilder::AtomicOpValue X = {XVal, false, false}; + OpenMPIRBuilder::AtomicOpValue V = {VVal, false, false}; + AtomicOrdering AO = AtomicOrdering::Monotonic; + ConstantInt *Expr = ConstantInt::get(Type::getInt32Ty(Ctx), 1U); + AtomicRMWInst::BinOp RMWOp = AtomicRMWInst::Add; + bool IsXLHSInRHSPart = true; + bool IsPostfixUpdate = true; + bool UpdateExpr = true; + + BasicBlock *EntryBB = BB; + Instruction *AllocIP = EntryBB->getFirstNonPHI(); + + // integer update - not used + auto UpdateOp = [&](Value *atomic, IRBuilder<> &IRB) { return nullptr; }; + + OMPBuilder.createAtomicCapture(Builder, AllocIP, X, V, Expr, AO, RMWOp, + UpdateOp, UpdateExpr, IsPostfixUpdate, + IsXLHSInRHSPart); + F->dump(); + EXPECT_EQ(EntryBB->getParent()->size(), 1U); + AtomicRMWInst *ARWM = dyn_cast(Init->getNextNode()); + EXPECT_NE(ARWM, nullptr); + EXPECT_EQ(ARWM->getPointerOperand(), XVal); + EXPECT_EQ(ARWM->getOperation(), RMWOp); + StoreInst *st = dyn_cast(ARWM->user_back()); + EXPECT_NE(st, nullptr); + EXPECT_EQ(st->getPointerOperand(), VVal); +} + } // namespace