Index: include/polly/CodeGen/BlockGenerators.h =================================================================== --- include/polly/CodeGen/BlockGenerators.h +++ include/polly/CodeGen/BlockGenerators.h @@ -370,8 +370,8 @@ /// (for values recalculated in the new ScoP, but not /// within this basic block) /// @param BBMap A mapping from old values to their new values in this block. - virtual void generateScalarStores(ScopStmt &Stmt, LoopToScevMapT <S, - ValueMapT &BBMap); + void generateScalarStores(ScopStmt &Stmt, LoopToScevMapT <S, + ValueMapT &BBMap); /// @brief Handle users of @p Inst outside the SCoP. /// @@ -492,8 +492,8 @@ /// subclasses to handle PHIs different. /// /// @returns The nullptr as the BlockGenerator does not copy PHIs. - virtual Value *copyPHIInstruction(ScopStmt &, PHINode *, ValueMapT &, - LoopToScevMapT &) { + virtual PHINode *copyPHIInstruction(ScopStmt &, PHINode *, ValueMapT &, + LoopToScevMapT &) { return nullptr; } @@ -754,20 +754,6 @@ void addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI, PHINode *PHICopy, BasicBlock *IncomingBB, LoopToScevMapT <S); - /// @brief Generate the scalar stores for the given statement. - /// - /// After the statement @p Stmt was copied all inner-SCoP scalar dependences - /// starting in @p Stmt (hence all scalar write accesses in @p Stmt) need to - /// be demoted to memory. - /// - /// @param Stmt The statement we generate code for. - /// @param LTS A mapping from loops virtual canonical induction variable to - /// their new values (for values recalculated in the new ScoP, - /// but not within this basic block) - /// @param BBMap A mapping from old values to their new values in this block. - virtual void generateScalarStores(ScopStmt &Stmt, LoopToScevMapT <S, - ValueMapT &BBMAp) override; - /// @brief Copy a single PHI instruction. /// /// This copies a single PHI instruction and updates references to old values @@ -779,10 +765,10 @@ /// (for values recalculated within this basic block). /// @param LTS A map from old loops to new induction variables as SCEVs. /// - /// @returns The copied instruction or nullptr if no copy was made. - virtual Value *copyPHIInstruction(ScopStmt &Stmt, PHINode *Inst, - ValueMapT &BBMap, - LoopToScevMapT <S) override; + /// @returns The copied PHI. + virtual PHINode *copyPHIInstruction(ScopStmt &Stmt, PHINode *Inst, + ValueMapT &BBMap, + LoopToScevMapT <S) override; }; } #endif Index: include/polly/ScopInfo.h =================================================================== --- include/polly/ScopInfo.h +++ include/polly/ScopInfo.h @@ -275,8 +275,8 @@ /// | use float %V.reload1 | | use float %V.reload2 | /// ------------------------------------ ------------------------------------ /// - /// #AccessInst is either the llvm::Value for WRITEs or the value's user for - /// READS. The #BaseAddr is represented by the value's definition (i.e. the + /// #AccessInst is either the llvm::Value for WRITEs or nullptr for reads. + /// The #BaseAddr is represented by the value's definition (i.e. the /// llvm::Value itself) as no such alloca yet exists before CodeGeneration. /// #AccessValue is also the llvm::Value itself. /// @@ -759,6 +759,23 @@ /// @bried Implicit stores at the end of a statement. MemoryAccessVec TrailingWrites; + /// @brief The set of values defined elsewehere required in this ScopStmt and + /// their SCALAR READ MemoryAccesses. + DenseMap ScalarReads; + + /// @brief The set of values defined in this ScopStmt that are required + /// elsewehere and their SCALAR WRITE MemoryAccesses. + DenseMap ScalarWrites; + + /// @brief If this ScopStmt is an incoming block of a PHI node, its incoming + /// value needs to be written as a trailing write. + /// + /// Non-affine subregions can + /// have multiple exiting blocks that are incoming blocks of the PHI nodes. + /// This map ensures that there is only one write operation for the complete + /// subregion. A PHI selecting the relevant value will be inserted. + DenseMap PHIWrites; + //@} /// @brief A SCoP statement represents either a basic block (affine/precise @@ -923,6 +940,27 @@ /// @brief Return the vector of trailing implicit stores. const MemoryAccessVec &getTrailingWrites() const { return TrailingWrites; } + /// @brief Return the MemoryAccess that reloads a value, or nullptr if not + /// (yet) added. + MemoryAccess *lookupScalarReadOf(Value *Inst) const { + return ScalarReads.lookup(Inst); + } + + /// @brief Return the MemoryAccess that writes the value of an instruction + /// defined in this block, or nullptr if not (yet) added. + MemoryAccess *lookupScalarWriteOf(Instruction *Inst) const { + assert((isRegionStmt() && R->contains(Inst)) || + (!isRegionStmt() && Inst->getParent() == BB)); + return ScalarWrites.lookup(Inst); + } + + /// @brief Return the PHI write MemoryAccess for the incoming values from any + /// basic block in this ScopStmt, or nullptr if not (yet) added. + MemoryAccess *lookupPHIWriteOf(PHINode *PHI) const { + assert(isBlockStmt() || R->getExit() == PHI->getParent()); + return PHIWrites.lookup(PHI); + } + void setBasicBlock(BasicBlock *Block) { // TODO: Handle the case where the statement is a region statement, thus // the entry block was split and needs to be changed in the region R. @@ -930,9 +968,21 @@ BB = Block; } - /// @brief Add @p Access to this statement's list of accesses. + /// @brief Add @p Access to this statement's list of explicit accesses. void addExplicitAccess(MemoryAccess *Access); + /// @brief Add @p Access to this statement's list of SCALAR READ accesses. + void addScalarRead(MemoryAccess *Access); + + /// @brief Add @p Access to this statement's list of SCALAR WRITE accesses. + void addScalarWrite(MemoryAccess *Access); + + /// @brief Add @p Access to this statement's list of PHI READ accesses. + void addPHIRead(MemoryAccess *Access); + + /// @brief Add @p Access to this statement's list of PHI WRITE accesses. + void addPHIWrite(MemoryAccess *Access); + /// @brief Add @p Access to this statement's list of implicit loads. void addLeadingLoad(MemoryAccess *Access); @@ -1684,12 +1734,9 @@ /// /// @param Inst The instruction to be analyzed /// @param R The SCoP region - /// @param NonAffineSubRegion The non affine sub-region @p Inst is in. - /// - /// @return True if the Instruction is used in other BB and a scalar write - /// Access is required. - bool buildScalarDependences(Instruction *Inst, Region *R, - Region *NonAffineSubRegio); + void buildScalarDependences(Instruction *Inst); + + void buildEscapingDependences(Instruction *Inst); /// @brief Create MemoryAccesses for the given PHI node in the given region. /// @@ -1763,38 +1810,25 @@ ArrayRef Subscripts, ArrayRef Sizes, Value *AccessValue); - /// @brief Create a MemoryAccess for writing an llvm::Value. + /// @brief Ensure that there is a MemoryAccess that writes a value's + /// definition. /// - /// The access will be created at the @p Value's definition. + /// The access will be in the ScopStmt where @p Value is defined, if none + /// exists yet. /// /// @param Value The value to be written. /// @see addScalarReadAccess() /// @see AccessOrigin - void addScalarWriteAccess(Instruction *Value); + void ensureScalarStore(Instruction *Value); - /// @brief Create a MemoryAccess for reloading an llvm::Value. - /// - /// Use this overload only for non-PHI instructions. - /// - /// @param Value The scalar expected to be loaded. - /// @param User User of the scalar; this is where the access is added. - /// @see addScalarWriteAccess() - /// @see AccessOrigin - void addScalarReadAccess(Value *Value, Instruction *User); - - /// @brief Create a MemoryAccess for reloading an llvm::Value. - /// - /// This is for PHINodes using the scalar. As we model it, the used value must - /// be available at the incoming block instead of when hitting the - /// instruction. + /// @brief Ensures that a @p Value will be reloaded in the ScopStmt that + /// contains UserBB. /// /// @param Value The scalar expected to be loaded. - /// @param User The PHI node referencing @p Value. - /// @param UserBB Incoming block for the incoming @p Value. - /// @see addPHIWriteAccess() + /// @param UserBB Where to reload the value. /// @see addScalarWriteAccess() /// @see AccessOrigin - void addScalarReadAccess(Value *Value, PHINode *User, BasicBlock *UserBB); + void ensureScalarReload(Value *Value, BasicBlock *UserBB); /// @brief Create a write MemoryAccess for the incoming block of a phi node. /// @@ -1809,8 +1843,8 @@ /// PHINode in the SCoP region's exit block. /// @see addPHIReadAccess() /// @see AccessOrigin - void addPHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock, - Value *IncomingValue, bool IsExitBlock); + void ensurePHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock, + Value *IncomingValue, bool IsExitBlock); /// @brief Create a MemoryAccess for reading the value of a phi. /// Index: lib/Analysis/ScopInfo.cpp =================================================================== --- lib/Analysis/ScopInfo.cpp +++ lib/Analysis/ScopInfo.cpp @@ -852,6 +852,45 @@ MemAccs.push_back(Access); } +void ScopStmt::addScalarRead(MemoryAccess *Access) { + assert(Access->isScalar() && Access->isRead()); + + Value *AccessVal = Access->getAccessValue(); + assert(!ScalarReads.lookup(AccessVal)); + + ScalarReads[AccessVal] = Access; + addLeadingLoad(Access); +} + +void ScopStmt::addScalarWrite(MemoryAccess *Access) { + assert(Access->isScalar() && Access->isWrite()); + + Instruction *AccessVal = cast(Access->getAccessValue()); + assert(Parent.getStmtForBasicBlock(AccessVal->getParent()) == this); + assert(!ScalarWrites.lookup(cast(AccessVal))); + + ScalarWrites[AccessVal] = Access; + addTrailingWrite(Access); +} + +void ScopStmt::addPHIRead(MemoryAccess *Access) { + assert(Access->isPHI() && Access->isRead()); + + addLeadingLoad(Access); +} + +void ScopStmt::addPHIWrite(MemoryAccess *Access) { + assert(Access->isImplicit() && Access->isWrite() && + "The origin must be either PHI of SCALAR (for escaping PHI values in " + "the exit block)"); + + PHINode *PHI = cast(Access->getBaseAddr()); + assert(!PHIWrites.lookup(PHI)); + + PHIWrites[PHI] = Access; + addTrailingWrite(Access); +} + void ScopStmt::realignParams() { for (MemoryAccess *MA : *this) MA->realignParams(); @@ -3170,55 +3209,37 @@ if (!IsExitBlock && canSynthesize(PHI, LI, SE, &R)) return; + // Do not build scalar dependences inside a non-affine subregion. + if (NonAffineSubRegion && PHI->getParent() != NonAffineSubRegion->getEntry()) + return; + // PHI nodes are modeled as if they had been demoted prior to the SCoP // detection. Hence, the PHI is a load of a new memory location in which the // incoming value was written at the end of the incoming basic block. - bool OnlyNonAffineSubRegionOperands = true; for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) { Value *Op = PHI->getIncomingValue(u); BasicBlock *OpBB = PHI->getIncomingBlock(u); - // Do not build scalar dependences inside a non-affine subregion. - if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB)) - continue; - - OnlyNonAffineSubRegionOperands = false; - - if (!R.contains(OpBB)) - continue; - - Instruction *OpI = dyn_cast(Op); - if (OpI) { - BasicBlock *OpIBB = OpI->getParent(); - // As we pretend there is a use (or more precise a write) of OpI in OpBB - // we have to insert a scalar dependence from the definition of OpI to - // OpBB if the definition is not in OpBB. - if (scop->getStmtForBasicBlock(OpIBB) != - scop->getStmtForBasicBlock(OpBB)) { - addScalarReadAccess(OpI, PHI, OpBB); - addScalarWriteAccess(OpI); - } - } else if (ModelReadOnlyScalars && !isa(Op)) { - addScalarReadAccess(Op, PHI, OpBB); - } - - addPHIWriteAccess(PHI, OpBB, Op, IsExitBlock); + ensurePHIWriteAccess(PHI, OpBB, Op, IsExitBlock); } - - if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) { + if (!IsExitBlock) addPHIReadAccess(PHI); - } } -bool ScopInfo::buildScalarDependences(Instruction *Inst, Region *R, - Region *NonAffineSubRegion) { - bool canSynthesizeInst = canSynthesize(Inst, LI, SE, R); - if (isIgnoredIntrinsic(Inst)) - return false; +void ScopInfo::buildScalarDependences(Instruction *Inst) { + assert(!isa(Inst)); - bool AnyCrossStmtUse = false; - BasicBlock *ParentBB = Inst->getParent(); + // Pull-in required operands. + for (auto &Op : Inst->operands()) + ensureScalarReload(Op.get(), Inst->getParent()); +} + +void ScopInfo::buildEscapingDependences(Instruction *Inst) { + Region *R = &scop->getRegion(); + // Check for uses of this instruction outside the scop. Because we do not + // iterate over such instructions and therefore do not "ensured" the existence + // of a write, we must determine such use here. for (User *U : Inst->users()) { Instruction *UI = dyn_cast(U); @@ -3228,77 +3249,16 @@ BasicBlock *UseParent = UI->getParent(); - // Ignore the users in the same BB (statement) - if (UseParent == ParentBB) - continue; - - // Do not build scalar dependences inside a non-affine subregion. - if (NonAffineSubRegion && NonAffineSubRegion->contains(UseParent)) - continue; - - // Check for PHI nodes in the region exit and skip them, if they will be - // modeled as PHI nodes. - // - // PHI nodes in the region exit that have more than two incoming edges need - // to be modeled as PHI-Nodes to correctly model the fact that depending on - // the control flow a different value will be assigned to the PHI node. In - // case this is the case, there is no need to create an additional normal - // scalar dependence. Hence, bail out before we register an "out-of-region" - // use for this definition. - if (isa(UI) && UI->getParent() == R->getExit() && - !R->getExitingBlock()) - continue; - - // Check whether or not the use is in the SCoP. - // If there is single exiting block, the single incoming value exit for node - // PHIs are handled like any escaping SCALAR. Otherwise, as if the PHI - // belongs to the the scop region. - bool IsExitNodePHI = isa(UI) && UI->getParent() == R->getExit(); - if (!R->contains(UseParent) && (R->getExitingBlock() || !IsExitNodePHI)) { - AnyCrossStmtUse = true; - continue; - } - - // If the instruction can be synthesized and the user is in the region - // we do not need to add scalar dependences. - if (canSynthesizeInst) - continue; - - // No need to translate these scalar dependences into polyhedral form, - // because synthesizable scalars can be generated by the code generator. - if (canSynthesize(UI, LI, SE, R)) - continue; - - // Skip PHI nodes in the region as they handle their operands on their own. - if (isa(UI)) + // buildAccessFunctions iterates over exit node PHIs separately. + if (isa(UI) && UseParent == R->getExit() && !R->getExitingBlock()) continue; - // Now U is used in another statement. - AnyCrossStmtUse = true; - - // Do not build a read access that is not in the current SCoP - // Use the def instruction as base address of the MemoryAccess, so that it - // will become the name of the scalar access in the polyhedral form. - addScalarReadAccess(Inst, UI); - } - - if (ModelReadOnlyScalars && !isa(Inst)) { - for (Value *Op : Inst->operands()) { - if (canSynthesize(Op, LI, SE, R)) - continue; - - if (Instruction *OpInst = dyn_cast(Op)) - if (R->contains(OpInst)) - continue; - - if (isa(Op)) - continue; - - addScalarReadAccess(Op, Inst); + if (!R->contains(UseParent)) { + // At least one escaping use found. + ensureScalarStore(Inst); + break; } } - - return AnyCrossStmtUse; } extern MapInsnToMemAcc InsnToMemAcc; @@ -3487,10 +3447,10 @@ if (ScopRIL.count(dyn_cast(Inst))) continue; - if (buildScalarDependences(Inst, &R, NonAffineSubRegion)) { - if (!isa(Inst)) - addScalarWriteAccess(Inst); - } + if (!PHI) + buildScalarDependences(Inst); + if (!IsExitBlock) + buildEscapingDependences(Inst); } } @@ -3517,10 +3477,15 @@ std::string IdName = "__polly_array_ref_" + std::to_string(Identifier); isl_id *Id = isl_id_alloc(ctx, IdName.c_str(), nullptr); - bool isApproximated = - Stmt->isRegionStmt() && (Stmt->getRegion()->getEntry() != BB); - if (isApproximated && Type == MemoryAccess::MUST_WRITE) - Type = MemoryAccess::MAY_WRITE; + // The execution of an explicit store is not guranteed if not in the entry + // block of a subregion. By contrast, implicit writes must occur in + // well-formed IR code. + if (Origin == MemoryAccess::EXPLICIT) { + bool isApproximated = + Stmt->isRegionStmt() && (Stmt->getRegion()->getEntry() != BB); + if (isApproximated && Type == MemoryAccess::MUST_WRITE) + Type = MemoryAccess::MAY_WRITE; + } AccList.emplace_back(Stmt, Inst, Id, Type, BaseAddress, ElemBytes, Affine, Subscripts, Sizes, AccessValue, Origin, BaseName); @@ -3539,46 +3504,115 @@ if (Acc) Acc->getStatement()->addExplicitAccess(Acc); } -void ScopInfo::addScalarWriteAccess(Instruction *Value) { + +void ScopInfo::ensureScalarStore(Instruction *Value) { + ScopStmt *Stmt = scop->getStmtForBasicBlock(Value->getParent()); + + // Value not defined within the SCoP. + if (!Stmt) + return; + + // Do not process further if the value is already written. + if (Stmt->lookupScalarWriteOf(Value)) + return; + MemoryAccess *Acc = addMemoryAccess(Value->getParent(), Value, MemoryAccess::MUST_WRITE, Value, 1, true, Value, ArrayRef(), ArrayRef(), MemoryAccess::SCALAR); if (Acc) - Acc->getStatement()->addTrailingWrite(Acc); + Stmt->addScalarWrite(Acc); } -void ScopInfo::addScalarReadAccess(Value *Value, Instruction *User) { - assert(!isa(User)); + +void ScopInfo::ensureScalarReload(Value *Value, BasicBlock *UserBB) { + + // Therea cannot be an "access" for constants. + if (isa(Value)) + return; + + // If the instruction can be synthesized and the user is in the region we do + // not need to add scalar dependences. + auto &ScopRegion = scop->getRegion(); + if (canSynthesize(Value, LI, SE, &ScopRegion)) + return; + + // Determine the ScopStmt containing the value's definition and use. There is + // not definition ScopStmt if the value is a function argument, a global + // value, or defined outdie the scop. + Instruction *ValueInst = dyn_cast(Value); + ScopStmt *ValueStmt = + ValueInst ? scop->getStmtForBasicBlock(ValueInst->getParent()) : nullptr; + ScopStmt *UserStmt = scop->getStmtForBasicBlock(UserBB); + + // We do not model uses outside the scop. + if (!UserStmt) + return; + + // Add MemoryAccess for invariant values only if requested. + if (!ModelReadOnlyScalars && !ValueStmt) + return; + + // Ignore use-def chains within the same ScopStmt. + if (ValueStmt == UserStmt) + return; + + // Do not create another MemoeryAccess for reloading the value if one already + // exists. + if (UserStmt->lookupScalarReadOf(Value)) + return; + MemoryAccess *Acc = addMemoryAccess( - User->getParent(), User, MemoryAccess::READ, Value, 1, true, Value, + UserBB, nullptr, MemoryAccess::READ, Value, 1, true, Value, ArrayRef(), ArrayRef(), MemoryAccess::SCALAR); - if (Acc) - Acc->getStatement()->addLeadingLoad(Acc); + if (!Acc) + return; + + UserStmt->addScalarRead(Acc); + + // If we load the value, we should also ensure that it is written. + if (ValueStmt) + ensureScalarStore(ValueInst); } -void ScopInfo::addScalarReadAccess(Value *Value, PHINode *User, - BasicBlock *UserBB) { + +void ScopInfo::ensurePHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock, + Value *IncomingValue, bool IsExitBlock) { + + ScopStmt *PHIStmt = scop->getStmtForBasicBlock(PHI->getParent()); + ScopStmt *IncomingStmt = scop->getStmtForBasicBlock(IncomingBlock); + + assert(IsExitBlock == !PHIStmt); + if (!IncomingStmt) + return; + + // Take care for the incoming value being available in the incoming block. + // This must be done before the check for multiple PHI writes because multiple + // exiting edges from subregion each can be the effective written value of the + // subregion. As such, all of them must be made available in the subregion + // statement. + ensureScalarReload(IncomingValue, IncomingBlock); + + // Do not add more than one MemoryAccess per PHINode and ScopStmt. + if (IncomingStmt->lookupPHIWriteOf(PHI)) + return; + + // TODO: Only the first PHI Write will be added to the statement, hence the + // selected IncomingBlock depends on the order in the PHI instruction. Better + // chose a representatative (or nullptr?) for which the order does not matter. MemoryAccess *Acc = addMemoryAccess( - UserBB, User, MemoryAccess::READ, Value, 1, true, Value, - ArrayRef(), ArrayRef(), MemoryAccess::SCALAR); - if (Acc) - Acc->getStatement()->addLeadingLoad(Acc); -} -void ScopInfo::addPHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock, - Value *IncomingValue, bool IsExitBlock) { - MemoryAccess *Acc = - addMemoryAccess(IncomingBlock, IncomingBlock->getTerminator(), - MemoryAccess::MUST_WRITE, PHI, 1, true, IncomingValue, - ArrayRef(), ArrayRef(), - IsExitBlock ? MemoryAccess::SCALAR : MemoryAccess::PHI); - if (Acc) - Acc->getStatement()->addTrailingWrite(Acc); + IncomingBlock, IncomingBlock->getTerminator(), MemoryAccess::MUST_WRITE, + PHI, 1, true, IncomingStmt->isRegionStmt() ? PHI : IncomingValue, + ArrayRef(), ArrayRef(), + IsExitBlock ? MemoryAccess::SCALAR : MemoryAccess::PHI); + assert(Acc); + IncomingStmt->addPHIWrite(Acc); } + void ScopInfo::addPHIReadAccess(PHINode *PHI) { MemoryAccess *Acc = addMemoryAccess( PHI->getParent(), PHI, MemoryAccess::READ, PHI, 1, true, PHI, ArrayRef(), ArrayRef(), MemoryAccess::PHI); if (Acc) - Acc->getStatement()->addLeadingLoad(Acc); + Acc->getStatement()->addPHIRead(Acc); } void ScopInfo::buildScop(Region &R, DominatorTree &DT) { Index: lib/CodeGen/BlockGenerators.cpp =================================================================== --- lib/CodeGen/BlockGenerators.cpp +++ lib/CodeGen/BlockGenerators.cpp @@ -417,6 +417,9 @@ if (!ScalarValueInst) return ScalarValue; + if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst)) + return /* Case (3a) */ ScalarValueCopy; + if (!R.contains(ScalarValueInst)) { if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst)) return /* Case (3a) */ ScalarValueCopy; @@ -424,9 +427,6 @@ return /* Case 2 */ ScalarValue; } - if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst)) - return /* Case (3a) */ ScalarValueCopy; - if ((Stmt.isBlockStmt() && Stmt.getBasicBlock() == ScalarValueInst->getParent()) || (Stmt.isRegionStmt() && Stmt.getRegion()->contains(ScalarValueInst))) { @@ -448,10 +448,6 @@ ValueMapT &BBMap) { const Region &R = Stmt.getParent()->getRegion(); - assert(Stmt.isBlockStmt() && "Region statements need to use the " - "generateScalarStores() function in the " - "RegionGenerator"); - for (MemoryAccess *MA : Stmt.getTrailingWrites()) { assert(MA->isImplicit()); assert(MA->isWrite()); @@ -1118,45 +1114,35 @@ // Continue generating code in the exit block. Builder.SetInsertPoint(ExitBBCopy->getFirstInsertionPt()); - // Write values visible to other statements. - generateScalarStores(Stmt, LTS, ValueMap); -} - -void RegionGenerator::generateScalarStores(ScopStmt &Stmt, LoopToScevMapT <S, - ValueMapT &BBMap) { - const Region &R = Stmt.getParent()->getRegion(); - - assert(Stmt.getRegion() && - "Block statements need to use the generateScalarStores() " - "function in the BlockGenerator"); - - for (MemoryAccess *MA : Stmt) { - if (MA->isExplicit() || MA->isRead()) + // Create PHIs when visible after this subregion. This ensures that there is + // only one store at the subregions's exit instead of multiple in the exiting + // blocks. + for (MemoryAccess *MA : Stmt.getTrailingWrites()) { + // Only continue for .phiops writes and SCALAR accesses for PHIs in the + // SCoP's exit node. + if (!isa(MA->getAccessInstruction())) continue; - Instruction *ScalarInst = MA->getAccessInstruction(); - Value *Val = MA->getAccessValue(); - - // In case we add the store into an exiting block, we need to restore the - // position for stores in the exit node. - auto SavedInsertionPoint = Builder.GetInsertPoint(); - - // Implicit writes induced by PHIs must be written in the incoming blocks. - if (isa(ScalarInst)) { - BasicBlock *ExitingBB = ScalarInst->getParent(); - BasicBlock *ExitingBBCopy = BlockMap[ExitingBB]; - Builder.SetInsertPoint(ExitingBBCopy->getTerminator()); + PHINode *PHI = cast(MA->getBaseAddr()); + + // If the PHI was in the SCoP's exit node, region simplification and loop + // versioning will have separated the exit block and the PHINode instance. + // Here, find the PHINode instance in the subregion's exit block. + PHINode *RegionExitPHI = PHI; + if (!Stmt.getParent()->getRegion().contains(PHI)) { + assert(PHI->getNumIncomingValues() == 1); + RegionExitPHI = cast(PHI->getIncomingValue(0)); + if (ValueMap.count(RegionExitPHI)) { + ValueMap[PHI] = ValueMap[RegionExitPHI]; + continue; + } } - auto Address = getOrCreateAlloca(*MA); - - Val = getNewScalarValue(Val, R, Stmt, LTS, BBMap); - Builder.CreateStore(Val, Address); - - // Restore the insertion point if necessary. - if (isa(ScalarInst)) - Builder.SetInsertPoint(SavedInsertionPoint); + ValueMap[PHI] = copyPHIInstruction(Stmt, RegionExitPHI, ValueMap, LTS); } + + // Write values visible to other statements. + generateScalarStores(Stmt, LTS, ValueMap); } void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI, @@ -1201,9 +1187,9 @@ PHICopy->addIncoming(OpCopy, BBCopy); } -Value *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI, - ValueMapT &BBMap, - LoopToScevMapT <S) { +PHINode *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI, + ValueMapT &BBMap, + LoopToScevMapT <S) { unsigned NumIncoming = PHI->getNumIncomingValues(); PHINode *PHICopy = Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName()); Index: test/DependenceInfo/different_schedule_dimensions.ll =================================================================== --- test/DependenceInfo/different_schedule_dimensions.ll +++ test/DependenceInfo/different_schedule_dimensions.ll @@ -2,7 +2,7 @@ ; RUN: -analyze < %s | FileCheck %s ; CHECK: RAW dependences: -; CHECK: { Stmt_bb9[0] -> Stmt_bb10[0] } +; CHECK: { Stmt_bb3[0] -> Stmt_bb10[0]; Stmt_bb9[0] -> Stmt_bb10[0] } ; CHECK: WAR dependences: ; CHECK: { Stmt_bb3[0] -> Stmt_bb10[0] } ; CHECK: WAW dependences: Index: test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll =================================================================== --- test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll +++ test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll @@ -10,13 +10,14 @@ ; CHECK: br i1 %p_tmp8, label %polly.stmt.bb9, label %polly.stmt.bb10 ; CHECK: polly.stmt.bb9: ; preds = %polly.stmt.bb3 -; CHECK: store double 1.000000e+00, double* %tmp12.phiops ; CHECK: br label %polly.stmt.bb11.exit ; CHECK: polly.stmt.bb10: ; preds = %polly.stmt.bb3 -; CHECK: store double 2.000000e+00, double* %tmp12.phiops ; CHECK: br label %polly.stmt.bb11.exit +; CHECK: polly.stmt.bb11.exit: ; preds = %polly.stmt.bb10, %polly.stmt.bb9 +; CHECK: %polly.tmp12 = phi double [ 1.000000e+00, %polly.stmt.bb9 ], [ 2.000000e+00, %polly.stmt.bb10 ] +; CHECK: store double %polly.tmp12, double* %tmp12.phiops define void @hoge(i32 %arg, [1024 x double]* %arg1) { bb: Index: test/Isl/CodeGen/non-affine-phi-node-expansion-3.ll =================================================================== --- test/Isl/CodeGen/non-affine-phi-node-expansion-3.ll +++ test/Isl/CodeGen/non-affine-phi-node-expansion-3.ll @@ -18,23 +18,23 @@ ; CHECK-NEXT: %p_val1 = fadd float 1.000000e+00, 2.000000e+00 ; CHECK-NEXT: %p_val2 = fadd float 1.000000e+00, 2.000000e+00 ; CHECK-NEXT: %polly.subregion.iv.inc = add i32 %polly.subregion.iv, 1 -; CHECK-NEXT: store float %p_val0, float* %merge.phiops ; CHECK-NEXT: br i1 branch1: br i1 %cond1, label %branch2, label %backedge ; CHECK-LABEL: polly.stmt.branch1: -; CHECK-NEXT: store float %p_val1, float* %merge.phiops ; CHECK-NEXT: br i1 branch2: br label %backedge ; CHECK-LABEL: polly.stmt.branch2: -; CHECK-NEXT: store float %p_val2, float* %merge.phiops ; CHECK-NEXT: br label +; CHECK-LABEL: polly.stmt.backedge.exit: +; CHECK: %polly.merge = phi float [ %p_val0, %polly.stmt.loop ], [ %p_val1, %polly.stmt.branch1 ], [ %p_val2, %polly.stmt.branch2 ] + backedge: %merge = phi float [%val0, %loop], [%val1, %branch1], [%val2, %branch2] %indvar.next = add i64 %indvar, 1 Index: test/Isl/CodeGen/non-affine-phi-node-expansion-4.ll =================================================================== --- test/Isl/CodeGen/non-affine-phi-node-expansion-4.ll +++ test/Isl/CodeGen/non-affine-phi-node-expansion-4.ll @@ -16,7 +16,6 @@ ; CHECK-NEXT: %p_val0 = fadd float 1.000000e+00, 2.000000e+00 ; CHECK-NEXT: %p_val1 = fadd float 1.000000e+00, 2.000000e+00 ; CHECK-NEXT: %polly.subregion.iv.inc = add i32 %polly.subregion.iv, 1 -; CHECK-NEXT: store float %p_val0, float* %merge.phiops ; CHECK-NEXT: br i1 ; The interesting instruction here is %val2, which does not dominate the exit of @@ -29,16 +28,17 @@ ; CHECK-LABEL: polly.stmt.branch1: ; CHECK-NEXT: %p_val2 = fadd float 1.000000e+00, 2.000000e+00 -; CHECK-NEXT: store float %p_val1, float* %merge.phiops ; CHECK-NEXT: br i1 branch2: br label %backedge ; CHECK-LABEL: polly.stmt.branch2: -; CHECK-NEXT: store float %p_val2, float* %merge.phiops ; CHECK-NEXT: br label +; CHECK-LABEL: polly.stmt.backedge.exit: +; CHECK: %polly.merge = phi float [ %p_val0, %polly.stmt.loop ], [ %p_val1, %polly.stmt.branch1 ], [ %p_val2, %polly.stmt.branch2 ] + backedge: %merge = phi float [%val0, %loop], [%val1, %branch1], [%val2, %branch2] %indvar.next = add i64 %indvar, 1 Index: test/ScopInfo/NonAffine/non_affine_loop_used_later.ll =================================================================== --- test/ScopInfo/NonAffine/non_affine_loop_used_later.ll +++ test/ScopInfo/NonAffine/non_affine_loop_used_later.ll @@ -37,14 +37,10 @@ ; CHECK: [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_A[i0] }; ; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_A[i0] }; -; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_smax[] }; -; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_j_2__phi[] }; ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_j_0[] }; -; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_j_2__phi[] }; ; CHECK: Stmt_bb18 ; CHECK: Schedule := ; CHECK: [N] -> { Stmt_bb18[i0] -> [i0, 2] }; Index: test/ScopInfo/intra-non-affine-stmt-phi-node.ll =================================================================== --- test/ScopInfo/intra-non-affine-stmt-phi-node.ll +++ test/ScopInfo/intra-non-affine-stmt-phi-node.ll @@ -10,10 +10,6 @@ ; CHECK-NEXT: { Stmt_loop__TO__backedge[i0] -> [i0, 0] }; ; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK-NEXT: { Stmt_loop__TO__backedge[i0] -> MemRef_merge__phi[] }; -; CHECK-NEXT: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK-NEXT: { Stmt_loop__TO__backedge[i0] -> MemRef_merge__phi[] }; -; CHECK-NEXT: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK-NEXT: { Stmt_loop__TO__backedge[i0] -> MemRef_merge__phi[] }; ; CHECK-NEXT: Stmt_backedge ; CHECK-NEXT: Domain := ; CHECK-NEXT: { Stmt_backedge[i0] : i0 <= 100 and i0 >= 0 }; Index: test/ScopInfo/many-scalar-dependences.ll =================================================================== --- test/ScopInfo/many-scalar-dependences.ll +++ test/ScopInfo/many-scalar-dependences.ll @@ -91,12 +91,12 @@ ; CHECK: { Stmt_bb12[i0, i1, i2] -> [i0, 2, i1, 2, i2, 3] }; ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: { Stmt_bb12[i0, i1, i2] -> MemRef_x_3__phi[] }; -; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: { Stmt_bb12[i0, i1, i2] -> MemRef_x_3[] }; ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: { Stmt_bb12[i0, i1, i2] -> MemRef_a[i0, i1] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: { Stmt_bb12[i0, i1, i2] -> MemRef_a[i0, i1] }; +; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: { Stmt_bb12[i0, i1, i2] -> MemRef_x_3[] }; ; CHECK: Stmt_bb16 ; CHECK: Domain := ; CHECK: { Stmt_bb16[i0, i1, i2] : i0 <= 99 and i0 >= 0 and i1 <= 99 and i1 >= 0 and i2 <= 99 and i2 >= 0 }; Index: test/ScopInfo/non_affine_region_2.ll =================================================================== --- test/ScopInfo/non_affine_region_2.ll +++ test/ScopInfo/non_affine_region_2.ll @@ -35,10 +35,6 @@ ; CHECK-NEXT: { Stmt_bb3__TO__bb18[i0] -> MemRef_A[i0] }; ; CHECK-NOT: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_0[] }; ; CHECK-NOT: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_1[] }; -; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK-NEXT: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] }; -; CHECK-NOT: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_0[] }; -; CHECK-NOT: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_1[] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK-NEXT: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] }; ; CHECK-NOT: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_0[] }; Index: test/ScopInfo/non_affine_region_3.ll =================================================================== --- test/ScopInfo/non_affine_region_3.ll +++ test/ScopInfo/non_affine_region_3.ll @@ -31,12 +31,6 @@ ; CHECK: { Stmt_bb3__TO__bb18[i0] -> [i0, 0] }; ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: { Stmt_bb3__TO__bb18[i0] -> MemRef_A[i0] }; -; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] }; -; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] }; -; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] }; ; CHECK: Stmt_bb18 Index: test/ScopInfo/non_affine_region_4.ll =================================================================== --- test/ScopInfo/non_affine_region_4.ll +++ test/ScopInfo/non_affine_region_4.ll @@ -16,14 +16,14 @@ ; ; CHECK: Arrays { ; CHECK: i32 MemRef_A[*][4] -; CHECK: i32 MemRef_x[*] [BasePtrOrigin: MemRef_A] ; CHECK: i32 MemRef_y__phi[*] +; CHECK: i32 MemRef_x[*] [BasePtrOrigin: MemRef_A] ; CHECK: } ; ; CHECK: Arrays (Bounds as pw_affs) { ; CHECK: i32 MemRef_A[*][ { [] -> [(4)] } ] -; CHECK: i32 MemRef_x[*] [BasePtrOrigin: MemRef_A] ; CHECK: i32 MemRef_y__phi[*] +; CHECK: i32 MemRef_x[*] [BasePtrOrigin: MemRef_A] ; CHECK: } ; ; CHECK: Stmt_bb2__TO__bb7 @@ -38,11 +38,9 @@ ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: { Stmt_bb2__TO__bb7[i0] -> MemRef_A[i0] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: { Stmt_bb2__TO__bb7[i0] -> MemRef_x[] }; -; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: { Stmt_bb2__TO__bb7[i0] -> MemRef_y__phi[] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: { Stmt_bb2__TO__bb7[i0] -> MemRef_y__phi[] }; +; CHECK: { Stmt_bb2__TO__bb7[i0] -> MemRef_x[] }; ; CHECK: Stmt_bb7 ; CHECK: Domain := ; CHECK: { Stmt_bb7[i0] : @@ -53,9 +51,9 @@ ; CHECK: Schedule := ; CHECK: { Stmt_bb7[i0] -> [i0, 1] }; ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: { Stmt_bb7[i0] -> MemRef_x[] }; -; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: { Stmt_bb7[i0] -> MemRef_y__phi[] }; +; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: { Stmt_bb7[i0] -> MemRef_x[] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: { Stmt_bb7[i0] -> MemRef_A[i0] }; ; Index: test/ScopInfo/phi_condition_modeling_2.ll =================================================================== --- test/ScopInfo/phi_condition_modeling_2.ll +++ test/ScopInfo/phi_condition_modeling_2.ll @@ -32,12 +32,12 @@ ; CHECK-NOT: Access ; CHECK-LABEL: Stmt_bb8b ; CHECK-NOT: Access -; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N, c] -> { Stmt_bb8b[i0] -> MemRef_tmp_0[] }; -; CHECK-NOT: Access ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: [N, c] -> { Stmt_bb8b[i0] -> MemRef_A[i0] }; ; CHECK-NOT: Access +; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: [N, c] -> { Stmt_bb8b[i0] -> MemRef_tmp_0[] }; +; CHECK-NOT: Access ; CHECK: } target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" Index: test/ScopInfo/phi_loop_carried_float.ll =================================================================== --- test/ScopInfo/phi_loop_carried_float.ll +++ test/ScopInfo/phi_loop_carried_float.ll @@ -21,10 +21,10 @@ ; CHECK: [N] -> { Stmt_bb4[i0] -> MemRef_tmp_0__phi[] }; ; CHECK-NOT: Access ; CHECK: ReadAccess := [Reduction Type: NONE] -; CHECK: [N] -> { Stmt_bb4[i0] -> MemRef_tmp_0[] }; +; CHECK: [N] -> { Stmt_bb4[i0] -> MemRef_A[i0] }; ; CHECK-NOT: Access ; CHECK: ReadAccess := [Reduction Type: NONE] -; CHECK: [N] -> { Stmt_bb4[i0] -> MemRef_A[i0] }; +; CHECK: [N] -> { Stmt_bb4[i0] -> MemRef_tmp_0[] }; ; CHECK-NOT: Access ; CHECK: } target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" Index: test/ScopInfo/phi_scalar_simple_1.ll =================================================================== --- test/ScopInfo/phi_scalar_simple_1.ll +++ test/ScopInfo/phi_scalar_simple_1.ll @@ -29,9 +29,6 @@ ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: [N] -> { Stmt_for_cond[i0] -> MemRef_x_addr_0[] }; ; CHECK-NOT: Access -; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N] -> { Stmt_for_cond[i0] -> MemRef_x_addr_0[] }; -; CHECK-NOT: Access %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc4 ], [ 1, %entry ] %x.addr.0 = phi i32 [ %x, %entry ], [ %x.addr.1.lcssa, %for.inc4 ] %cmp = icmp slt i64 %indvars.iv, %tmp @@ -71,12 +68,12 @@ ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: [N] -> { Stmt_for_inc[i0, i1] -> MemRef_x_addr_1__phi[] }; ; CHECK-NOT: Access -; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N] -> { Stmt_for_inc[i0, i1] -> MemRef_x_addr_1[] }; -; CHECK-NOT: Access ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: [N] -> { Stmt_for_inc[i0, i1] -> MemRef_A[1 + i0] }; ; CHECK-NOT: Access +; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: [N] -> { Stmt_for_inc[i0, i1] -> MemRef_x_addr_1[] }; +; CHECK-NOT: Access %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv %tmp1 = load i32, i32* %arrayidx, align 4 %add = add nsw i32 %x.addr.1, %tmp1 Index: test/ScopInfo/phi_scalar_simple_2.ll =================================================================== --- test/ScopInfo/phi_scalar_simple_2.ll +++ test/ScopInfo/phi_scalar_simple_2.ll @@ -28,9 +28,6 @@ ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: [N, c] -> { Stmt_for_cond[i0] -> MemRef_A[i0] }; ; CHECK-NOT: Access -; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N, c] -> { Stmt_for_cond[i0] -> MemRef_x_addr_0[] }; -; CHECK-NOT: Access %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc5 ], [ 0, %entry ] %x.addr.0 = phi i32 [ %x, %entry ], [ %x.addr.1, %for.inc5 ] %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv @@ -58,12 +55,6 @@ ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: [N, c] -> { Stmt_for_cond1[i0, i1] -> MemRef_x_addr_1__phi[] }; ; CHECK-NOT: Access -; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N, c] -> { Stmt_for_cond1[i0, i1] -> MemRef_x_addr_1[] }; -; CHECK-NOT: Access -; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N, c] -> { Stmt_for_cond1[i0, i1] -> MemRef_x_addr_1[] }; -; CHECK-NOT: Access %x.addr.1 = phi i32 [ %x.addr.0, %for.body ], [ %x.addr.2, %for.inc ] %j.0 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ] %exitcond = icmp ne i32 %j.0, %N @@ -84,12 +75,12 @@ if.then: ; preds = %for.body3 ; CHECK-LABEL: Stmt_if_then ; CHECK-NOT: Access -; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK: [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_x_addr_1[] }; -; CHECK-NOT: Access ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_A[i0] }; ; CHECK-NOT: Access +; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_x_addr_1[] }; +; CHECK-NOT: Access ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_x_addr_2__phi[] }; ; CHECK-NOT: Access Index: test/ScopInfo/read-only-scalar-used-in-phi.ll =================================================================== --- test/ScopInfo/read-only-scalar-used-in-phi.ll +++ test/ScopInfo/read-only-scalar-used-in-phi.ll @@ -18,6 +18,8 @@ ; CHECK: { Stmt_next[] -> MemRef_sum[] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] ; CHECK: { Stmt_next[] -> MemRef_phisum__phi[] }; +; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: { Stmt_next[] -> MemRef_phisummerge[] }; ; CHECK: Stmt_bb1 ; CHECK: Domain := ; CHECK: { Stmt_bb1[i0] : i0 <= 100 and i0 >= 0 }; @@ -29,6 +31,8 @@ ; CHECK: { Stmt_bb1[i0] -> MemRef_phisum__phi[] }; ; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK: { Stmt_bb1[i0] -> MemRef_A[i0] }; +; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK: { Stmt_bb1[i0] -> MemRef_phisummerge[] }; target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" Index: test/ScopInfo/scalar.ll =================================================================== --- test/ScopInfo/scalar.ll +++ test/ScopInfo/scalar.ll @@ -53,7 +53,7 @@ ; CHECK: [N] -> { Stmt_S2[i0] : i0 >= 0 and i0 <= -1 + N }; ; CHECK: Schedule := ; CHECK: [N] -> { Stmt_S2[i0] -> [i0, 1] }; -; CHECK: ReadAccess := -; CHECK: [N] -> { Stmt_S2[i0] -> MemRef_val[] }; ; CHECK: MustWriteAccess := ; CHECK: [N] -> { Stmt_S2[i0] -> MemRef_a[i0] }; +; CHECK: ReadAccess := +; CHECK: [N] -> { Stmt_S2[i0] -> MemRef_val[] }; Index: test/ScopInfo/scalar_dependence_cond_br.ll =================================================================== --- test/ScopInfo/scalar_dependence_cond_br.ll +++ test/ScopInfo/scalar_dependence_cond_br.ll @@ -7,6 +7,7 @@ ; } ; ; FIXME: This test is a negative test until we have an independent blocks alternative. +; XFAIL: * ; ; We should move operands as close to their use as possible, hence in this case ; there should not be any scalar dependence anymore after %cmp1 is moved to Index: test/ScopInfo/tempscop-printing.ll =================================================================== --- test/ScopInfo/tempscop-printing.ll +++ test/ScopInfo/tempscop-printing.ll @@ -77,10 +77,10 @@ %indvar.j = phi i64 [ 0, %entry.next ], [ %indvar.j.next, %for.j ] %scevgep = getelementptr i64, i64* %A, i64 %indvar.j store i64 %init, i64* %scevgep -; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] -; CHECK-NEXT: [N] -> { Stmt_for_j[i0, i1] -> MemRef_init[] }; ; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0] ; CHECK-NEXT: [N] -> { Stmt_for_j[i0, i1] -> MemRef_A[i1] }; +; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 1] +; CHECK-NEXT: [N] -> { Stmt_for_j[i0, i1] -> MemRef_init[] }; %indvar.j.next = add nsw i64 %indvar.j, 1 %exitcond.j = icmp eq i64 %indvar.j.next, %N br i1 %exitcond.j, label %for.i.end, label %for.j