Index: include/polly/CodeGen/BlockGenerators.h
===================================================================
--- include/polly/CodeGen/BlockGenerators.h
+++ include/polly/CodeGen/BlockGenerators.h
@@ -795,7 +795,7 @@
   ///                  (for values recalculated within this basic block).
   /// @param LTS       A map from old loops to new induction variables as SCEVs.
   ///
-  /// @returns The copied instruction or nullptr if no copy was made.
+  /// @returns The copied PHI.
   virtual Value *copyPHIInstruction(ScopStmt &Stmt, PHINode *Inst,
                                     ValueMapT &BBMap,
                                     LoopToScevMapT &LTS) override;
Index: include/polly/ScopInfo.h
===================================================================
--- include/polly/ScopInfo.h
+++ include/polly/ScopInfo.h
@@ -322,8 +322,8 @@
   /// |  use float %V.reload1            |  |  use float %V.reload2            |
   /// ------------------------------------  ------------------------------------
   ///
-  /// #AccessInst is either the llvm::Value for WRITEs or the value's user for
-  /// READS. The #BaseAddr is represented by the value's definition (i.e. the
+  /// #AccessInst is either the llvm::Value for WRITEs or nullptr for reads.
+  /// The #BaseAddr is represented by the value's definition (i.e. the
   /// llvm::Value itself) as no such alloca yet exists before CodeGeneration.
   /// #AccessValue is also the llvm::Value itself.
   ///
@@ -355,8 +355,10 @@
   ///               -----------------------------------------
   ///
   /// Since the stores and loads do not exist in the analyzed code, the
-  /// #AccessInst of a load is the PHIInst and a incoming block's terminator for
-  /// stores. The #BaseAddr is represented through the PHINode because there
+  /// #AccessInst of a load is the PHIInst and a nullptr for stores (it can
+  /// represent multiple values that are leaving a non-affine region with
+  /// multiple exiting edges). The #BaseAddr is represented through the PHINode
+  /// because there
   /// also such alloca in the analyzed code. The #AccessValue is represented by
   /// the PHIInst itself.
   ///
@@ -481,6 +483,9 @@
   /// @brief The access instruction of this memory access.
   Instruction *AccessInstruction;
 
+  /// @brief Incoming block and value of a PHINode.
+  SmallVector<std::pair<BasicBlock *, Value *>, 4> Incoming;
+
   /// @brief The value associated with this memory access.
   ///
   ///  - For real memory accesses it is the loaded result or the stored value.
@@ -582,6 +587,16 @@
                Value *AccessValue, AccessOrigin Origin, StringRef BaseName);
   ~MemoryAccess();
 
+  void addIncoming(BasicBlock *IncomingBlock, Value *IncomingValue) {
+    assert(isAnyPHI());
+    Incoming.emplace_back(std::make_pair(IncomingBlock, IncomingValue));
+  }
+
+  ArrayRef<std::pair<BasicBlock *, Value *>> getIncoming() const {
+    assert(isAnyPHI());
+    return Incoming;
+  }
+
   /// @brief Get the type of a memory access.
   enum AccessType getType() { return AccType; }
 
@@ -690,6 +705,9 @@
   /// SCoP's exit block?
   bool isExitPHI() const { return Origin == EXIT_PHI; }
 
+  /// @brief Doeas this access orginate from one of the two PHI types?
+  bool isAnyPHI() const { return isPHI() || isExitPHI(); }
+
   /// @brief Get the statement that contains this memory access.
   ScopStmt *getStatement() const { return Statement; }
 
@@ -815,6 +833,23 @@
   /// @bried Implicit stores at the end of a statement.
   MemoryAccessVec TrailingWrites;
 
+  /// @brief The set of values defined elsewehere required in this ScopStmt and
+  ///        their SCALAR READ MemoryAccesses.
+  DenseMap<Value *, MemoryAccess *> ScalarReads;
+
+  /// @brief The set of values defined in this ScopStmt that are required
+  ///        elsewehere and their SCALAR WRITE MemoryAccesses.
+  DenseMap<Instruction *, MemoryAccess *> ScalarWrites;
+
+  /// @brief If this ScopStmt is an incoming block of a PHI node, its incoming
+  ///        value needs to be written as a trailing write.
+  ///
+  /// Non-affine subregions can
+  /// have multiple exiting blocks that are incoming blocks of the PHI nodes.
+  /// This map ensures that there is only one write operation for the complete
+  /// subregion. A PHI selecting the relevant value will be inserted.
+  DenseMap<PHINode *, MemoryAccess *> PHIWrites;
+
   //@}
 
   /// @brief A SCoP statement represents either a basic block (affine/precise
@@ -979,6 +1014,27 @@
   /// @brief Return the vector of trailing implicit stores.
   const MemoryAccessVec &getTrailingWrites() const { return TrailingWrites; }
 
+  /// @brief Return the MemoryAccess that reloads a value, or nullptr if not
+  ///        (yet) added.
+  MemoryAccess *lookupScalarReadOf(Value *Inst) const {
+    return ScalarReads.lookup(Inst);
+  }
+
+  /// @brief Return the MemoryAccess that writes the value of an instruction
+  ///        defined in this block, or nullptr if not (yet) added.
+  MemoryAccess *lookupScalarWriteOf(Instruction *Inst) const {
+    assert((isRegionStmt() && R->contains(Inst)) ||
+           (!isRegionStmt() && Inst->getParent() == BB));
+    return ScalarWrites.lookup(Inst);
+  }
+
+  /// @brief Return the PHI write MemoryAccess for the incoming values from any
+  ///        basic block in this ScopStmt, or nullptr if not (yet) added.
+  MemoryAccess *lookupPHIWriteOf(PHINode *PHI) const {
+    assert(isBlockStmt() || R->getExit() == PHI->getParent());
+    return PHIWrites.lookup(PHI);
+  }
+
   void setBasicBlock(BasicBlock *Block) {
     // TODO: Handle the case where the statement is a region statement, thus
     //       the entry block was split and needs to be changed in the region R.
@@ -986,9 +1042,21 @@
     BB = Block;
   }
 
-  /// @brief Add @p Access to this statement's list of accesses.
+  /// @brief Add @p Access to this statement's list of explicit accesses.
   void addExplicitAccess(MemoryAccess *Access);
 
+  /// @brief Add @p Access to this statement's list of SCALAR READ accesses.
+  void addScalarRead(MemoryAccess *Access);
+
+  /// @brief Add @p Access to this statement's list of SCALAR WRITE accesses.
+  void addScalarWrite(MemoryAccess *Access);
+
+  /// @brief Add @p Access to this statement's list of PHI READ accesses.
+  void addPHIRead(MemoryAccess *Access);
+
+  /// @brief Add @p Access to this statement's list of PHI WRITE accesses.
+  void addPHIWrite(MemoryAccess *Access);
+
   /// @brief Add @p Access to this statement's list of implicit loads.
   void addLeadingLoad(MemoryAccess *Access);
 
@@ -1582,6 +1650,11 @@
   /// @brief Return the stmt for the given @p BB or nullptr if none.
   ScopStmt *getStmtForBasicBlock(BasicBlock *BB) const;
 
+  /// @brief Return the ScopStmt that represents @p RN; can return nullptr if
+  /// the RegionNode is not within the SCoP or has been removed due to
+  /// simplifications.
+  ScopStmt *getStmtForRegionNode(RegionNode *RN) const;
+
   /// @brief Return the number of statements in the SCoP.
   size_t getSize() const { return Stmts.size(); }
 
@@ -1778,13 +1851,12 @@
   ///        dataflow dependencies) of an instruction.
   ///
   /// @param Inst               The instruction to be analyzed
-  /// @param R                  The SCoP region
-  /// @param NonAffineSubRegion The non affine sub-region @p Inst is in.
-  ///
-  /// @return     True if the Instruction is used in other BB and a scalar write
-  ///             Access is required.
-  bool buildScalarDependences(Instruction *Inst, Region *R,
-                              Region *NonAffineSubRegio);
+  void buildScalarDependences(Instruction *Inst);
+
+  /// @brief Search for uses of the llvm::Value defined by @p Inst that are not
+  /// within the SCoP. If there is such use, add a SCALAR WRITE such that it is
+  /// available after the SCoP as escaping value.
+  void buildEscapingDependences(Instruction *Inst);
 
   /// @brief Create MemoryAccesses for the given PHI node in the given region.
   ///
@@ -1819,6 +1891,7 @@
 
   /// @brief Create a new MemoryAccess object and add it to #AccFuncMap.
   ///
+  /// @param Stmt        The ScopStmt this access will be added to.
   /// @param BB          The block where the access takes place.
   /// @param Inst        The instruction doing the access. It is not necessarily
   ///                    inside @p BB.
@@ -1833,13 +1906,11 @@
   ///
   /// @return The newly created MemoryAccess instance or NULL if the access
   ///         would have no effect.
-  MemoryAccess *addMemoryAccess(BasicBlock *BB, Instruction *Inst,
-                                MemoryAccess::AccessType Type,
-                                Value *BaseAddress, unsigned ElemBytes,
-                                bool Affine, Value *AccessValue,
-                                ArrayRef<const SCEV *> Subscripts,
-                                ArrayRef<const SCEV *> Sizes,
-                                MemoryAccess::AccessOrigin Origin);
+  MemoryAccess *addMemoryAccess(
+      ScopStmt *Stmt, BasicBlock *BB, Instruction *Inst,
+      MemoryAccess::AccessType Type, Value *BaseAddress, unsigned ElemBytes,
+      bool Affine, Value *AccessValue, ArrayRef<const SCEV *> Subscripts,
+      ArrayRef<const SCEV *> Sizes, MemoryAccess::AccessOrigin Origin);
 
   /// @brief Create a MemoryAccess that represents either a LoadInst or
   /// StoreInst.
@@ -1858,38 +1929,25 @@
                          ArrayRef<const SCEV *> Subscripts,
                          ArrayRef<const SCEV *> Sizes, Value *AccessValue);
 
-  /// @brief Create a MemoryAccess for writing an llvm::Value.
+  /// @brief Ensure that there is a MemoryAccess that writes a value's
+  /// definition.
   ///
-  /// The access will be created at the @p Value's definition.
+  /// The access will be in the ScopStmt where @p Value is defined, if none
+  /// exists yet.
   ///
   /// @param Value The value to be written.
   /// @see addScalarReadAccess()
   /// @see AccessOrigin
-  void addScalarWriteAccess(Instruction *Value);
+  void ensureScalarStore(Instruction *Value);
 
-  /// @brief Create a MemoryAccess for reloading an llvm::Value.
-  ///
-  /// Use this overload only for non-PHI instructions.
-  ///
-  /// @param Value The scalar expected to be loaded.
-  /// @param User  User of the scalar; this is where the access is added.
-  /// @see addScalarWriteAccess()
-  /// @see AccessOrigin
-  void addScalarReadAccess(Value *Value, Instruction *User);
-
-  /// @brief Create a MemoryAccess for reloading an llvm::Value.
-  ///
-  /// This is for PHINodes using the scalar. As we model it, the used value must
-  /// be available at the incoming block instead of when hitting the
-  /// instruction.
+  /// @brief Ensures that a @p Value will be reloaded in the ScopStmt that
+  /// contains UserBB.
   ///
   /// @param Value  The scalar expected to be loaded.
-  /// @param User   The PHI node referencing @p Value.
-  /// @param UserBB Incoming block for the incoming @p Value.
-  /// @see addPHIWriteAccess()
+  /// @param UserBB Where to reload the value.
   /// @see addScalarWriteAccess()
   /// @see AccessOrigin
-  void addScalarReadAccess(Value *Value, PHINode *User, BasicBlock *UserBB);
+  void ensureScalarReload(Value *Value, BasicBlock *UserBB);
 
   /// @brief Create a write MemoryAccess for the incoming block of a phi node.
   ///
@@ -1904,8 +1962,8 @@
   ///                      PHINode in the SCoP region's exit block.
   /// @see addPHIReadAccess()
   /// @see AccessOrigin
-  void addPHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock,
-                         Value *IncomingValue, bool IsExitBlock);
+  void ensurePHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock,
+                            Value *IncomingValue, bool IsExitBlock);
 
   /// @brief Create a MemoryAccess for reading the value of a phi.
   ///
Index: include/polly/Support/ScopHelper.h
===================================================================
--- include/polly/Support/ScopHelper.h
+++ include/polly/Support/ScopHelper.h
@@ -164,5 +164,14 @@
 ///         otherwise return false.
 bool canSynthesize(const llvm::Value *V, const llvm::LoopInfo *LI,
                    llvm::ScalarEvolution *SE, const llvm::Region *R);
+
+/// @brief Return the block in which a value is used.
+///
+/// For normal instructions, this is the instruction's parent block. For PHI
+/// nodes, this is the incoming block of that use, because this is where the
+/// operand must be defined (i.e. its definition dominates this block).
+/// Non-instructions do not use operands at a specific point such that in this
+/// case this function returns nullptr.
+llvm::BasicBlock *getUseBlock(llvm::Use &U);
 }
 #endif
Index: lib/Analysis/ScopInfo.cpp
===================================================================
--- lib/Analysis/ScopInfo.cpp
+++ lib/Analysis/ScopInfo.cpp
@@ -520,8 +520,9 @@
   // bail out more often than strictly necessary.
   Outside = isl_set_remove_divs(Outside);
   Outside = isl_set_complement(Outside);
-  Statement->getParent()->addAssumption(INBOUNDS, Outside,
-                                        getAccessInstruction()->getDebugLoc());
+  Statement->getParent()->addAssumption(
+      INBOUNDS, Outside,
+      getAccessInstruction() ? getAccessInstruction()->getDebugLoc() : nullptr);
   isl_space_free(Space);
 }
 
@@ -748,7 +749,8 @@
   }
   OS << "[Reduction Type: " << getReductionType() << "] ";
   OS << "[Scalar: " << isImplicit() << "]\n";
-  OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
+  if (AccessRelation)
+    OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
   if (hasNewAccessRelation())
     OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
 }
@@ -885,7 +887,7 @@
       Ty = ScopArrayInfo::KIND_PHI;
     else if (Access->isExitPHI())
       Ty = ScopArrayInfo::KIND_EXIT_PHI;
-    else if (Access->isImplicit())
+    else if (Access->isScalar())
       Ty = ScopArrayInfo::KIND_SCALAR;
     else
       Ty = ScopArrayInfo::KIND_ARRAY;
@@ -924,6 +926,45 @@
   MemAccs.push_back(Access);
 }
 
+void ScopStmt::addScalarRead(MemoryAccess *Access) {
+  assert(Access->isScalar() && Access->isRead());
+
+  Value *AccessVal = Access->getAccessValue();
+  assert(!ScalarReads.lookup(AccessVal));
+
+  ScalarReads[AccessVal] = Access;
+  addLeadingLoad(Access);
+}
+
+void ScopStmt::addScalarWrite(MemoryAccess *Access) {
+  assert(Access->isScalar() && Access->isWrite());
+
+  Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
+  assert(Parent.getStmtForBasicBlock(AccessVal->getParent()) == this);
+  assert(!ScalarWrites.lookup(cast<Instruction>(AccessVal)));
+
+  ScalarWrites[AccessVal] = Access;
+  addTrailingWrite(Access);
+}
+
+void ScopStmt::addPHIRead(MemoryAccess *Access) {
+  assert(Access->isPHI() && Access->isRead());
+
+  addLeadingLoad(Access);
+}
+
+void ScopStmt::addPHIWrite(MemoryAccess *Access) {
+  assert(Access->isImplicit() && Access->isWrite() &&
+         "The origin must be either PHI of SCALAR (for escaping PHI values in "
+         "the exit block)");
+
+  PHINode *PHI = cast<PHINode>(Access->getBaseAddr());
+  assert(!PHIWrites.lookup(PHI));
+
+  PHIWrites[PHI] = Access;
+  addTrailingWrite(Access);
+}
+
 void ScopStmt::realignParams() {
   for (MemoryAccess *MA : *this)
     MA->realignParams();
@@ -1963,6 +2004,11 @@
                            : RN->getNodeAs<BasicBlock>();
 }
 
+static inline BasicBlock *getScopStmtBasicBlock(ScopStmt *Stmt) {
+  return Stmt->isRegionStmt() ? Stmt->getRegion()->getEntry()
+                              : Stmt->getBasicBlock();
+}
+
 /// @brief Return the @p idx'th block that is executed after @p RN.
 static inline BasicBlock *
 getRegionNodeSuccessor(RegionNode *RN, TerminatorInst *TI, unsigned idx) {
@@ -3339,15 +3385,14 @@
 
 bool Scop::isIgnored(RegionNode *RN) {
   BasicBlock *BB = getRegionNodeBasicBlock(RN);
+  ScopStmt *Stmt = getStmtForRegionNode(RN);
+
+  // If there is no stmt, then it already has been removed.
+  if (!Stmt)
+    return true;
 
   // Check if there are accesses contained.
-  bool ContainsAccesses = false;
-  if (!RN->isSubRegion())
-    ContainsAccesses = getAccessFunctions(BB);
-  else
-    for (BasicBlock *RBB : RN->getNodeAs<Region>()->blocks())
-      ContainsAccesses |= (getAccessFunctions(RBB) != nullptr);
-  if (!ContainsAccesses)
+  if (Stmt->isEmpty())
     return true;
 
   // Check for reachability via non-error blocks.
@@ -3512,6 +3557,10 @@
   return StmtMapIt->second;
 }
 
+ScopStmt *Scop::getStmtForRegionNode(RegionNode *RN) const {
+  return getStmtForBasicBlock(getRegionNodeBasicBlock(RN));
+}
+
 int Scop::getRelativeLoopDepth(const Loop *L) const {
   Loop *OuterLoop =
       L ? R.outermostLoopInRegion(const_cast<Loop *>(L)) : nullptr;
@@ -3534,155 +3583,57 @@
   if (!IsExitBlock && canSynthesize(PHI, LI, SE, &R))
     return;
 
+  // Do not build scalar dependences inside a non-affine subregion. Region
+  // entries can have a mix of incoming edges from within and outside the
+  // region, s.t. a PHI demotion is required as well.
+  // Should be equivalent to getStmtForBasicBlock() == get
+  if (NonAffineSubRegion && PHI->getParent() != NonAffineSubRegion->getEntry())
+    return;
+
   // PHI nodes are modeled as if they had been demoted prior to the SCoP
   // detection. Hence, the PHI is a load of a new memory location in which the
   // incoming value was written at the end of the incoming basic block.
-  bool OnlyNonAffineSubRegionOperands = true;
   for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
     Value *Op = PHI->getIncomingValue(u);
     BasicBlock *OpBB = PHI->getIncomingBlock(u);
 
-    // Do not build scalar dependences inside a non-affine subregion.
-    if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB))
-      continue;
-
-    OnlyNonAffineSubRegionOperands = false;
-
-    if (!R.contains(OpBB))
-      continue;
-
-    Instruction *OpI = dyn_cast<Instruction>(Op);
-    if (OpI) {
-      BasicBlock *OpIBB = OpI->getParent();
-      // As we pretend there is a use (or more precise a write) of OpI in OpBB
-      // we have to insert a scalar dependence from the definition of OpI to
-      // OpBB if the definition is not in OpBB.
-      if (scop->getStmtForBasicBlock(OpIBB) !=
-          scop->getStmtForBasicBlock(OpBB)) {
-        addScalarReadAccess(OpI, PHI, OpBB);
-        addScalarWriteAccess(OpI);
-      }
-    } else if (ModelReadOnlyScalars && !isa<Constant>(Op)) {
-      addScalarReadAccess(Op, PHI, OpBB);
-    }
-
-    addPHIWriteAccess(PHI, OpBB, Op, IsExitBlock);
+    ensurePHIWriteAccess(PHI, OpBB, Op, IsExitBlock);
   }
-
-  if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
+  if (!IsExitBlock)
     addPHIReadAccess(PHI);
-  }
 }
 
-bool ScopInfo::buildScalarDependences(Instruction *Inst, Region *R,
-                                      Region *NonAffineSubRegion) {
-  bool canSynthesizeInst = canSynthesize(Inst, LI, SE, R);
-  if (isIgnoredIntrinsic(Inst))
-    return false;
-
-  bool AnyCrossStmtUse = false;
-  BasicBlock *ParentBB = Inst->getParent();
-
-  for (User *U : Inst->users()) {
-    Instruction *UI = dyn_cast<Instruction>(U);
-
-    // Ignore the strange user
-    if (UI == 0)
-      continue;
-
-    BasicBlock *UseParent = UI->getParent();
-
-    // Ignore basic block local uses. A value that is defined in a scop, but
-    // used in a PHI node in the same basic block does not count as basic block
-    // local, as for such cases a control flow edge is passed between definition
-    // and use.
-    if (UseParent == ParentBB && !isa<PHINode>(UI))
-      continue;
-
-    // Uses by PHI nodes in the entry node count as external uses in case the
-    // use is through an incoming block that is itself not contained in the
-    // region.
-    if (R->getEntry() == UseParent) {
-      if (auto *PHI = dyn_cast<PHINode>(UI)) {
-        bool ExternalUse = false;
-        for (unsigned i = 0; i < PHI->getNumIncomingValues(); i++) {
-          if (PHI->getIncomingValue(i) == Inst &&
-              !R->contains(PHI->getIncomingBlock(i))) {
-            ExternalUse = true;
-            break;
-          }
-        }
-
-        if (ExternalUse) {
-          AnyCrossStmtUse = true;
-          continue;
-        }
-      }
-    }
-
-    // Do not build scalar dependences inside a non-affine subregion.
-    if (NonAffineSubRegion && NonAffineSubRegion->contains(UseParent))
-      continue;
-
-    // Check for PHI nodes in the region exit and skip them, if they will be
-    // modeled as PHI nodes.
-    //
-    // PHI nodes in the region exit that have more than two incoming edges need
-    // to be modeled as PHI-Nodes to correctly model the fact that depending on
-    // the control flow a different value will be assigned to the PHI node. In
-    // case this is the case, there is no need to create an additional normal
-    // scalar dependence. Hence, bail out before we register an "out-of-region"
-    // use for this definition.
-    if (isa<PHINode>(UI) && UI->getParent() == R->getExit() &&
-        !R->getExitingBlock())
-      continue;
-
-    // Check whether or not the use is in the SCoP.
-    if (!R->contains(UseParent)) {
-      AnyCrossStmtUse = true;
-      continue;
-    }
+void ScopInfo::buildScalarDependences(Instruction *Inst) {
+  assert(!isa<PHINode>(Inst));
 
-    // If the instruction can be synthesized and the user is in the region
-    // we do not need to add scalar dependences.
-    if (canSynthesizeInst)
-      continue;
+  // Pull-in required operands.
+  for (auto &Op : Inst->operands())
+    ensureScalarReload(Op.get(), Inst->getParent());
+}
 
-    // No need to translate these scalar dependences into polyhedral form,
-    // because synthesizable scalars can be generated by the code generator.
-    if (canSynthesize(UI, LI, SE, R))
-      continue;
+void ScopInfo::buildEscapingDependences(Instruction *Inst) {
+  Region *R = &scop->getRegion();
 
-    // Skip PHI nodes in the region as they handle their operands on their own.
-    if (isa<PHINode>(UI))
+  // Check for uses of this instruction outside the scop. Because we do not
+  // iterate over such instructions and therefore do not "ensured" the existence
+  // of a write, we must determine such use here.
+  for (Use &U : Inst->uses()) {
+    Instruction *UI = dyn_cast<Instruction>(U.getUser());
+    if (!UI)
       continue;
 
-    // Now U is used in another statement.
-    AnyCrossStmtUse = true;
-
-    // Do not build a read access that is not in the current SCoP
-    // Use the def instruction as base address of the MemoryAccess, so that it
-    // will become the name of the scalar access in the polyhedral form.
-    addScalarReadAccess(Inst, UI);
-  }
-
-  if (ModelReadOnlyScalars && !isa<PHINode>(Inst)) {
-    for (Value *Op : Inst->operands()) {
-      if (canSynthesize(Op, LI, SE, R))
-        continue;
+    BasicBlock *UseParent = getUseBlock(U);
+    BasicBlock *UserParent = UI->getParent();
 
-      if (Instruction *OpInst = dyn_cast<Instruction>(Op))
-        if (R->contains(OpInst))
-          continue;
-
-      if (isa<Constant>(Op))
-        continue;
-
-      addScalarReadAccess(Op, Inst);
+    // TODO: Explain condition
+    if (!R->contains(UseParent) ||
+        (isa<PHINode>(UI) && UserParent == R->getExit() &&
+         R->getExitingBlock())) {
+      // At least one escaping use found.
+      ensureScalarStore(Inst);
+      break;
     }
   }
-
-  return AnyCrossStmtUse;
 }
 
 extern MapInsnToMemAcc InsnToMemAcc;
@@ -3851,10 +3802,9 @@
   // The set of loads that are required to be invariant.
   auto &ScopRIL = *SD->getRequiredInvariantLoads(&R);
 
-  for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) {
-    Instruction *Inst = &*I;
+  for (Instruction &Inst : BB) {
 
-    PHINode *PHI = dyn_cast<PHINode>(Inst);
+    PHINode *PHI = dyn_cast<PHINode>(&Inst);
     if (PHI)
       buildPHIAccesses(PHI, R, NonAffineSubRegion, IsExitBlock);
 
@@ -3867,32 +3817,28 @@
     //       there might be other invariant accesses that will be hoisted and
     //       that would allow to make a non-affine access affine.
     if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
-      buildMemoryAccess(Inst, L, &R, BoxedLoops, ScopRIL);
+      buildMemoryAccess(&Inst, L, &R, BoxedLoops, ScopRIL);
 
-    if (isIgnoredIntrinsic(Inst))
+    if (isIgnoredIntrinsic(&Inst))
       continue;
 
     // Do not build scalar dependences for required invariant loads as we will
     // hoist them later on anyway or drop the SCoP if we cannot.
-    if (ScopRIL.count(dyn_cast<LoadInst>(Inst)))
+    if (ScopRIL.count(dyn_cast<LoadInst>(&Inst)))
       continue;
 
-    if (buildScalarDependences(Inst, &R, NonAffineSubRegion)) {
-      if (!isa<StoreInst>(Inst))
-        addScalarWriteAccess(Inst);
-    }
+    if (!PHI)
+      buildScalarDependences(&Inst);
+    if (!IsExitBlock)
+      buildEscapingDependences(&Inst);
   }
 }
 
-MemoryAccess *ScopInfo::addMemoryAccess(BasicBlock *BB, Instruction *Inst,
-                                        MemoryAccess::AccessType Type,
-                                        Value *BaseAddress, unsigned ElemBytes,
-                                        bool Affine, Value *AccessValue,
-                                        ArrayRef<const SCEV *> Subscripts,
-                                        ArrayRef<const SCEV *> Sizes,
-                                        MemoryAccess::AccessOrigin Origin) {
-  ScopStmt *Stmt = scop->getStmtForBasicBlock(BB);
-
+MemoryAccess *ScopInfo::addMemoryAccess(
+    ScopStmt *Stmt, BasicBlock *BB, Instruction *Inst,
+    MemoryAccess::AccessType Type, Value *BaseAddress, unsigned ElemBytes,
+    bool Affine, Value *AccessValue, ArrayRef<const SCEV *> Subscripts,
+    ArrayRef<const SCEV *> Sizes, MemoryAccess::AccessOrigin Origin) {
   // Do not create a memory access for anything not in the SCoP. It would be
   // ignored anyway.
   if (!Stmt)
@@ -3902,10 +3848,15 @@
   Value *BaseAddr = BaseAddress;
   std::string BaseName = getIslCompatibleName("MemRef_", BaseAddr, "");
 
-  bool isApproximated =
-      Stmt->isRegionStmt() && (Stmt->getRegion()->getEntry() != BB);
-  if (isApproximated && Type == MemoryAccess::MUST_WRITE)
-    Type = MemoryAccess::MAY_WRITE;
+  // The execution of an explicit store is not guranteed if not in the entry
+  // block of a subregion. By contrast, implicit writes must occur in
+  // well-formed IR code.
+  if (Origin == MemoryAccess::EXPLICIT) {
+    bool isApproximated =
+        Stmt->isRegionStmt() && (Stmt->getRegion()->getEntry() != BB);
+    if (isApproximated && Type == MemoryAccess::MUST_WRITE)
+      Type = MemoryAccess::MAY_WRITE;
+  }
 
   AccList.emplace_back(Stmt, Inst, Type, BaseAddress, ElemBytes, Affine,
                        Subscripts, Sizes, AccessValue, Origin, BaseName);
@@ -3918,52 +3869,123 @@
     ArrayRef<const SCEV *> Sizes, Value *AccessValue) {
   assert(isa<LoadInst>(MemAccInst) || isa<StoreInst>(MemAccInst));
   assert(isa<LoadInst>(MemAccInst) == (Type == MemoryAccess::READ));
+  ScopStmt *Stmt = scop->getStmtForBasicBlock(MemAccInst->getParent());
   MemoryAccess *Acc = addMemoryAccess(
-      MemAccInst->getParent(), MemAccInst, Type, BaseAddress, ElemBytes,
+      Stmt, MemAccInst->getParent(), MemAccInst, Type, BaseAddress, ElemBytes,
       IsAffine, AccessValue, Subscripts, Sizes, MemoryAccess::EXPLICIT);
   if (Acc)
     Acc->getStatement()->addExplicitAccess(Acc);
 }
-void ScopInfo::addScalarWriteAccess(Instruction *Value) {
+
+void ScopInfo::ensureScalarStore(Instruction *Value) {
+  ScopStmt *Stmt = scop->getStmtForBasicBlock(Value->getParent());
+
+  // Value not defined within the SCoP.
+  if (!Stmt)
+    return;
+
+  // Do not process further if the value is already written.
+  if (Stmt->lookupScalarWriteOf(Value))
+    return;
+
   MemoryAccess *Acc =
-      addMemoryAccess(Value->getParent(), Value, MemoryAccess::MUST_WRITE,
+      addMemoryAccess(Stmt, Value->getParent(), Value, MemoryAccess::MUST_WRITE,
                       Value, 1, true, Value, ArrayRef<const SCEV *>(),
                       ArrayRef<const SCEV *>(), MemoryAccess::SCALAR);
   if (Acc)
-    Acc->getStatement()->addTrailingWrite(Acc);
+    Stmt->addScalarWrite(Acc);
 }
-void ScopInfo::addScalarReadAccess(Value *Value, Instruction *User) {
-  assert(!isa<PHINode>(User));
+
+void ScopInfo::ensureScalarReload(Value *Value, BasicBlock *UserBB) {
+
+  // There cannot be an "access" for constants.
+  if (isa<Constant>(Value) || isa<BasicBlock>(Value))
+    return;
+
+  // If the instruction can be synthesized and the user is in the region we do
+  // not need to add scalar dependences.
+  Region &ScopRegion = scop->getRegion();
+  if (canSynthesize(Value, LI, SE, &ScopRegion))
+    return;
+
+  // Determine the ScopStmt containing the value's definition and use. There is
+  // no definition ScopStmt if the value is a function argument, a global
+  // value, or defined outside the scop.
+  Instruction *ValueInst = dyn_cast<Instruction>(Value);
+  ScopStmt *ValueStmt =
+      ValueInst ? scop->getStmtForBasicBlock(ValueInst->getParent()) : nullptr;
+  ScopStmt *UserStmt = scop->getStmtForBasicBlock(UserBB);
+
+  // We do not model uses outside the scop.
+  if (!UserStmt)
+    return;
+
+  // Add MemoryAccess for invariant values only if requested.
+  if (!ModelReadOnlyScalars && !ValueStmt)
+    return;
+
+  // Ignore use-def chains within the same ScopStmt.
+  if (ValueStmt == UserStmt)
+    return;
+
+  // Do not create another MemoryAccess for reloading the value if one already
+  // exists.
+  if (UserStmt->lookupScalarReadOf(Value))
+    return;
+
   MemoryAccess *Acc = addMemoryAccess(
-      User->getParent(), User, MemoryAccess::READ, Value, 1, true, Value,
+      UserStmt, UserBB, nullptr, MemoryAccess::READ, Value, 1, true, Value,
       ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(), MemoryAccess::SCALAR);
-  if (Acc)
-    Acc->getStatement()->addLeadingLoad(Acc);
+  if (!Acc)
+    return;
+
+  UserStmt->addScalarRead(Acc);
+
+  // If we load the value, we should also ensure that it is written.
+  if (ValueStmt)
+    ensureScalarStore(ValueInst);
 }
-void ScopInfo::addScalarReadAccess(Value *Value, PHINode *User,
-                                   BasicBlock *UserBB) {
+
+void ScopInfo::ensurePHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock,
+                                    Value *IncomingValue, bool IsExitBlock) {
+
+  ScopStmt *PHIStmt = scop->getStmtForBasicBlock(PHI->getParent());
+  ScopStmt *IncomingStmt = scop->getStmtForBasicBlock(IncomingBlock);
+
+  assert(IsExitBlock == !PHIStmt);
+  if (!IncomingStmt)
+    return;
+
+  // Take care for the incoming value being available in the incoming block.
+  // This must be done before the check for multiple PHI writes because multiple
+  // exiting edges from subregion each can be the effective written value of the
+  // subregion. As such, all of them must be made available in the subregion
+  // statement.
+  ensureScalarReload(IncomingValue, IncomingBlock);
+
+  // Do not add more than one MemoryAccess per PHINode and ScopStmt.
+  if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
+    assert(Acc->getAccessInstruction() == PHI);
+    Acc->addIncoming(IncomingBlock, IncomingValue);
+    return;
+  }
+
   MemoryAccess *Acc = addMemoryAccess(
-      UserBB, User, MemoryAccess::READ, Value, 1, true, Value,
-      ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(), MemoryAccess::SCALAR);
-  if (Acc)
-    Acc->getStatement()->addLeadingLoad(Acc);
-}
-void ScopInfo::addPHIWriteAccess(PHINode *PHI, BasicBlock *IncomingBlock,
-                                 Value *IncomingValue, bool IsExitBlock) {
-  MemoryAccess *Acc =
-      addMemoryAccess(IncomingBlock, IncomingBlock->getTerminator(),
-                      MemoryAccess::MUST_WRITE, PHI, 1, true, IncomingValue,
-                      ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
-                      IsExitBlock ? MemoryAccess::EXIT_PHI : MemoryAccess::PHI);
-  if (Acc)
-    Acc->getStatement()->addTrailingWrite(Acc);
+      IncomingStmt, PHI->getParent(), PHI, MemoryAccess::MUST_WRITE, PHI, 1,
+      true, PHI, ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
+      IsExitBlock ? MemoryAccess::EXIT_PHI : MemoryAccess::PHI);
+  assert(Acc);
+  Acc->addIncoming(IncomingBlock, IncomingValue);
+  IncomingStmt->addPHIWrite(Acc);
 }
+
 void ScopInfo::addPHIReadAccess(PHINode *PHI) {
+  ScopStmt *Stmt = scop->getStmtForBasicBlock(PHI->getParent());
   MemoryAccess *Acc = addMemoryAccess(
-      PHI->getParent(), PHI, MemoryAccess::READ, PHI, 1, true, PHI,
+      Stmt, PHI->getParent(), PHI, MemoryAccess::READ, PHI, 1, true, PHI,
       ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(), MemoryAccess::PHI);
   if (Acc)
-    Acc->getStatement()->addLeadingLoad(Acc);
+    Acc->getStatement()->addPHIRead(Acc);
 }
 
 void ScopInfo::buildScop(Region &R, DominatorTree &DT, AssumptionCache &AC) {
Index: lib/CodeGen/BlockGenerators.cpp
===================================================================
--- lib/CodeGen/BlockGenerators.cpp
+++ lib/CodeGen/BlockGenerators.cpp
@@ -362,14 +362,14 @@
     return;
 
   EscapeUserVectorTy EscapeUsers;
-  for (User *U : Inst->users()) {
+  for (Use &U : Inst->uses()) {
 
     // Non-instruction user will never escape.
-    Instruction *UI = dyn_cast<Instruction>(U);
+    Instruction *UI = dyn_cast<Instruction>(U.getUser());
     if (!UI)
       continue;
 
-    if (R.contains(UI))
+    if (R.contains(getUseBlock(U)))
       continue;
 
     EscapeUsers.push_back(UI);
@@ -417,6 +417,9 @@
   if (!ScalarValueInst)
     return ScalarValue;
 
+  if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst))
+    return /* Case (3a) */ ScalarValueCopy;
+
   if (!R.contains(ScalarValueInst)) {
     if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst))
       return /* Case (3a) */ ScalarValueCopy;
@@ -424,9 +427,6 @@
       return /* Case 2 */ ScalarValue;
   }
 
-  if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst))
-    return /* Case (3a) */ ScalarValueCopy;
-
   if ((Stmt.isBlockStmt() &&
        Stmt.getBasicBlock() == ScalarValueInst->getParent()) ||
       (Stmt.isRegionStmt() && Stmt.getRegion()->contains(ScalarValueInst))) {
@@ -455,8 +455,10 @@
   for (MemoryAccess *MA : Stmt.getTrailingWrites()) {
     assert(MA->isImplicit());
     assert(MA->isWrite());
+    assert(!MA->isAnyPHI() || MA->getIncoming().size() == 1);
 
-    Value *Val = MA->getAccessValue();
+    Value *Val =
+        MA->isAnyPHI() ? MA->getIncoming()[0].second : MA->getAccessValue();
     auto *Address = getOrCreateAlloca(*MA);
 
     Val = getNewScalarValue(Val, R, Stmt, LTS, BBMap);
@@ -1181,44 +1183,65 @@
 
 void RegionGenerator::generateScalarStores(ScopStmt &Stmt, LoopToScevMapT &LTS,
                                            ValueMapT &BBMap) {
+  assert(Stmt.isRegionStmt());
   const Region &R = Stmt.getParent()->getRegion();
+  auto &SubR = *Stmt.getRegion();
 
-  assert(Stmt.getRegion() &&
-         "Block statements need to use the generateScalarStores() "
-         "function in the BlockGenerator");
-
-  for (MemoryAccess *MA : Stmt) {
-    if (MA->isExplicit() || MA->isRead())
-      continue;
-
-    Instruction *ScalarInst = MA->getAccessInstruction();
-    Value *Val = MA->getAccessValue();
-
-    // In case we add the store into an exiting block, we need to restore the
-    // position for stores in the exit node.
-    BasicBlock *SavedInsertBB = Builder.GetInsertBlock();
-    auto SavedInsertionPoint = Builder.GetInsertPoint();
-    ValueMapT *LocalBBMap = &BBMap;
-
-    // Implicit writes induced by PHIs must be written in the incoming blocks.
-    if (MA->isPHI() || MA->isExitPHI()) {
-      BasicBlock *ExitingBB = ScalarInst->getParent();
-      BasicBlock *ExitingBBCopy = BlockMap[ExitingBB];
-      Builder.SetInsertPoint(ExitingBBCopy->getTerminator());
+  for (MemoryAccess *MA : Stmt.getTrailingWrites()) {
+    assert(MA->isImplicit());
+    assert(MA->isWrite());
 
-      // For the incoming blocks, use the block's BBMap instead of the one for
-      // the entire region.
-      LocalBBMap = &RegionMaps[ExitingBBCopy];
+    Value *NewVal;
+    if (MA->isAnyPHI()) {
+      auto Incoming = MA->getIncoming();
+      assert(!Incoming.empty());
+
+      if (Incoming.size() == 1) {
+        Value *OldVal = Incoming[0].second;
+        NewVal = getNewScalarValue(OldVal, R, Stmt, LTS, BBMap);
+      } else {
+        // Create a PHI of all possible outgoing values of this subregion.
+
+        BasicBlock *SavedInsertBB = Builder.GetInsertBlock();
+        auto SavedIP = Builder.GetInsertPoint();
+        PHINode *OrigPHI = cast<PHINode>(MA->getAccessInstruction());
+        BasicBlock *NewSubregionExit = Builder.GetInsertBlock();
+
+        // This can happen if the subregion is simplified after the ScopStmts
+        // have been created; simplification happens as part of CodeGeneration.
+        if (OrigPHI->getParent() != SubR.getExit()) {
+          BasicBlock *FormerExit = SubR.getExitingBlock();
+          if (FormerExit)
+            NewSubregionExit = BlockMap.lookup(FormerExit);
+        }
+
+        PHINode *NewPHI = PHINode::Create(OrigPHI->getType(), Incoming.size(),
+                                          "polly." + OrigPHI->getName(),
+                                          NewSubregionExit->getFirstNonPHI());
+
+        for (auto &Pair : Incoming) {
+          BasicBlock *OrigIncomingBlock = Pair.first;
+          BasicBlock *NewIncomingBlock = BlockMap.lookup(OrigIncomingBlock);
+          Builder.SetInsertPoint(NewIncomingBlock->getTerminator());
+          assert(RegionMaps.count(NewIncomingBlock));
+          ValueMapT *LocalBBMap = &RegionMaps[NewIncomingBlock];
+
+          Value *OrigIncomingValue = Pair.second;
+          Value *NewIncomingValue =
+              getNewScalarValue(OrigIncomingValue, R, Stmt, LTS, *LocalBBMap);
+          NewPHI->addIncoming(NewIncomingValue, NewIncomingBlock);
+        }
+
+        Builder.SetInsertPoint(SavedInsertBB, SavedIP);
+        NewVal = NewPHI;
+      }
+    } else {
+      Value *OldVal = MA->getAccessValue();
+      NewVal = getNewScalarValue(OldVal, R, Stmt, LTS, BBMap);
     }
 
-    auto Address = getOrCreateAlloca(*MA);
-
-    Val = getNewScalarValue(Val, R, Stmt, LTS, *LocalBBMap);
-    Builder.CreateStore(Val, Address);
-
-    // Restore the insertion point if necessary.
-    if (MA->isPHI() || MA->isExitPHI())
-      Builder.SetInsertPoint(SavedInsertBB, SavedInsertionPoint);
+    Value *Address = getOrCreateAlloca(*MA);
+    Builder.CreateStore(NewVal, Address);
   }
 }
 
Index: lib/Support/ScopHelper.cpp
===================================================================
--- lib/Support/ScopHelper.cpp
+++ lib/Support/ScopHelper.cpp
@@ -453,3 +453,14 @@
 
   return false;
 }
+
+llvm::BasicBlock *polly::getUseBlock(llvm::Use &U) {
+  Instruction *UI = dyn_cast<Instruction>(U.getUser());
+  if (!UI)
+    return nullptr;
+
+  if (PHINode *PHI = dyn_cast<PHINode>(UI))
+    return PHI->getIncomingBlock(U);
+
+  return UI->getParent();
+}
Index: test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll
===================================================================
--- test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll
+++ test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll
@@ -10,13 +10,14 @@
 ; CHECK:   br i1 %p_tmp8, label %polly.stmt.bb9, label %polly.stmt.bb10
 
 ; CHECK: polly.stmt.bb9:                                   ; preds = %polly.stmt.bb3
-; CHECK:   store double 1.000000e+00, double* %tmp12.phiops
 ; CHECK:   br label %polly.stmt.bb11.exit
 
 ; CHECK: polly.stmt.bb10:                                  ; preds = %polly.stmt.bb3
-; CHECK:   store double 2.000000e+00, double* %tmp12.phiops
 ; CHECK:   br label %polly.stmt.bb11.exit
 
+; CHECK: polly.stmt.bb11.exit:                             ; preds = %polly.stmt.bb10, %polly.stmt.bb9
+; CHECK:   %polly.tmp12 = phi double [ 1.000000e+00, %polly.stmt.bb9 ], [ 2.000000e+00, %polly.stmt.bb10 ]
+; CHECK:   store double %polly.tmp12, double* %tmp12.phiops
 
 define void @hoge(i32 %arg, [1024 x double]* %arg1) {
 bb:
Index: test/Isl/CodeGen/non-affine-phi-node-expansion-3.ll
===================================================================
--- test/Isl/CodeGen/non-affine-phi-node-expansion-3.ll
+++ test/Isl/CodeGen/non-affine-phi-node-expansion-3.ll
@@ -16,23 +16,23 @@
 ; CHECK-NEXT: %p_val0 = fadd float 1.000000e+00, 2.000000e+00
 ; CHECK-NEXT: %p_val1 = fadd float 1.000000e+00, 2.000000e+00
 ; CHECK-NEXT: %p_val2 = fadd float 1.000000e+00, 2.000000e+00
-; CHECK-NEXT: store float %p_val0, float* %merge.phiops
 ; CHECK-NEXT: br i1
 
 branch1:
   br i1 %cond1, label %branch2, label %backedge
 
 ; CHECK-LABEL: polly.stmt.branch1:
-; CHECK-NEXT:    store float %p_val1, float* %merge.phiops
 ; CHECK-NEXT: br i1
 
 branch2:
   br label %backedge
 
 ; CHECK-LABEL: polly.stmt.branch2:
-; CHECK-NEXT:    store float %p_val2, float* %merge.phiops
 ; CHECK-NEXT:    br label
 
+; CHECK-LABEL: polly.stmt.backedge.exit:
+; CHECK:         %polly.merge = phi float [ %p_val0, %polly.stmt.loop ], [ %p_val1, %polly.stmt.branch1 ], [ %p_val2, %polly.stmt.branch2 ]
+
 backedge:
   %merge = phi float [%val0, %loop], [%val1, %branch1], [%val2, %branch2]
   %indvar.next = add i64 %indvar, 1
Index: test/Isl/CodeGen/non-affine-phi-node-expansion-4.ll
===================================================================
--- test/Isl/CodeGen/non-affine-phi-node-expansion-4.ll
+++ test/Isl/CodeGen/non-affine-phi-node-expansion-4.ll
@@ -14,7 +14,6 @@
 ; CHECK-LABEL: polly.stmt.loop:
 ; CHECK-NEXT:    %p_val0 = fadd float 1.000000e+00, 2.000000e+00
 ; CHECK-NEXT:    %p_val1 = fadd float 1.000000e+00, 2.000000e+00
-; CHECK-NEXT:    store float %p_val0, float* %merge.phiops
 ; CHECK-NEXT:    br i1
 
 ; The interesting instruction here is %val2, which does not dominate the exit of
@@ -27,16 +26,17 @@
 
 ; CHECK-LABEL: polly.stmt.branch1:
 ; CHECK-NEXT:    %p_val2 = fadd float 1.000000e+00, 2.000000e+00
-; CHECK-NEXT:    store float %p_val1, float* %merge.phiops
 ; CHECK-NEXT:    br i1
 
 branch2:
   br label %backedge
 
 ; CHECK-LABEL: polly.stmt.branch2:
-; CHECK-NEXT:    store float %p_val2, float* %merge.phiops
 ; CHECK-NEXT:    br label
 
+; CHECK-LABEL: polly.stmt.backedge.exit:
+; CHECK:         %polly.merge = phi float [ %p_val0, %polly.stmt.loop ], [ %p_val1, %polly.stmt.branch1 ], [ %p_val2, %polly.stmt.branch2 ]
+
 backedge:
   %merge = phi float [%val0, %loop], [%val1, %branch1], [%val2, %branch2]
   %indvar.next = add i64 %indvar, 1
Index: test/Isl/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll
===================================================================
--- test/Isl/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll
+++ test/Isl/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll
@@ -8,13 +8,15 @@
 ;
 ; CHECK-LABEL: polly.stmt.subregion_entry:
 ; CHECK:         %[[R0:[0-9]*]] = add i32 %n, -2
-; CHECK:         store i32 %[[R0]], i32* %retval.s2a
 ;
 ; CHECK-LABEL: polly.stmt.subregion_if:
 ; CHECK:         %[[R1:[0-9]*]] = add i32 %n, -2
-; CHECK:         store i32 %[[R1]], i32* %retval.s2a
+;
+; CHECK-LABEL: polly.stmt.subregion_exit.region_exiting:
+; CHECK:         %polly.retval = phi i32 [ %[[R1]], %polly.stmt.subregion_if ], [ %[[R0]], %polly.stmt.subregion_entry ]
 ;
 ; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
+; CHECK:         store i32 %polly.retval, i32* %retval.s2a
 ; CHECK:         load i32, i32* %retval.s2a
 
 define i32 @func(i32 %n){
Index: test/Isl/CodeGen/out-of-scop-phi-node-use.ll
===================================================================
--- test/Isl/CodeGen/out-of-scop-phi-node-use.ll
+++ test/Isl/CodeGen/out-of-scop-phi-node-use.ll
@@ -9,8 +9,13 @@
 ; CHECK-LABEL: for.cond.981:
 ; CHECK-NEXT:  %_s.sroa.343.0.ph5161118 = phi i32 [ undef, %for.cond ], [ %_s.sroa.343.0.ph5161118.ph.merge, %polly.merge_new_and_old ]
 
+; CHECK-LABEL: polly.stmt.for.cond.981.region_exiting:
+; CHECK-NEXT:    %polly._s.sroa.343.0.ph5161118 = phi i32 [ undef, %polly.stmt.if.then.960 ], [ undef, %polly.stmt.if.else.969 ]
+; CHECK-NEXT:    br label %polly.stmt.polly.merge_new_and_old.exit
+
 ; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
-; CHECK-NEXT: %_s.sroa.343.0.ph5161118.ph.final_reload = load i32, i32* %_s.sroa.343.0.ph5161118.s2a
+; CHECK-NEXT:    store i32 %polly._s.sroa.343.0.ph5161118, i32* %_s.sroa.343.0.ph5161118.s2a
+; CHECK-NEXT:    %_s.sroa.343.0.ph5161118.ph.final_reload = load i32, i32* %_s.sroa.343.0.ph5161118.s2a
 
 ; Function Attrs: nounwind uwtable
 define void @lzmaDecode() #0 {
Index: test/Isl/CodeGen/phi-defined-before-scop.ll
===================================================================
--- test/Isl/CodeGen/phi-defined-before-scop.ll
+++ test/Isl/CodeGen/phi-defined-before-scop.ll
@@ -4,7 +4,6 @@
 ; CHECK-NEXT: %tmp7.ph.merge = phi %struct.wibble* [ %tmp7.ph.final_reload, %polly.stmt.bb5 ], [ %tmp7.ph, %bb6.region_exiting ]
 
 ; CHECK-LABEL: polly.stmt.bb3:
-; CHECK-NEXT: %tmp2.s2a.reload = load %struct.wibble*, %struct.wibble** %tmp2.s2a
 ; CHECK-NEXT: store %struct.wibble* %tmp2, %struct.wibble** %tmp7.s2a
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
Index: test/Isl/CodeGen/pr25241.ll
===================================================================
--- test/Isl/CodeGen/pr25241.ll
+++ test/Isl/CodeGen/pr25241.ll
@@ -6,10 +6,10 @@
 
 ; CHECK-LABEL: polly.stmt.if.then.862:
 ; CHECK:         %[[R1:[0-9]+]] = add i32 %tmp, 1
-; CHECK:         store i32 %0, i32* %curr.3.s2a
 ; CHECK:         br label
 
 ; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
+; CHECK:         store i32 %polly.curr.3, i32* %curr.3.s2a
 ; CHECK:         %curr.3.ph.final_reload = load i32, i32* %curr.3.s2a
 ; CHECK:         br label
 
Index: test/Isl/CodeGen/uninitialized_scalar_memory.ll
===================================================================
--- test/Isl/CodeGen/uninitialized_scalar_memory.ll
+++ test/Isl/CodeGen/uninitialized_scalar_memory.ll
@@ -5,7 +5,6 @@
 ;
 ; CHECK:      polly.start:
 ; CHECK-NEXT:   store float %ebig.0, float* %ebig.0.s2a
-; CHECK-NEXT:   store i32 %iebig.0, i32* %iebig.0.s2a
 ; CHECK-NEXT:   br label %polly.stmt.if.end.entry
 ;
 ;    int g(void);
Index: test/ScopInfo/NonAffine/non_affine_loop_used_later.ll
===================================================================
--- test/ScopInfo/NonAffine/non_affine_loop_used_later.ll
+++ test/ScopInfo/NonAffine/non_affine_loop_used_later.ll
@@ -37,14 +37,10 @@
 ; CHECK:                [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_A[i0] };
 ; CHECK:            MayWriteAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:                [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_A[i0] };
-; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:                [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_smax[] };
-; CHECK:            MayWriteAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:                [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_j_2__phi[] };
 ; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:                [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_j_0[] };
-; CHECK:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:                [N] -> { Stmt_bb4__TO__bb18[i0] -> MemRef_j_2__phi[] };
 ; CHECK:      Stmt_bb18
 ; CHECK:            Schedule :=
 ; CHECK:                [N] -> { Stmt_bb18[i0] -> [i0, 2] };
Index: test/ScopInfo/intra-non-affine-stmt-phi-node.ll
===================================================================
--- test/ScopInfo/intra-non-affine-stmt-phi-node.ll
+++ test/ScopInfo/intra-non-affine-stmt-phi-node.ll
@@ -10,10 +10,6 @@
 ; CHECK-NEXT:             { Stmt_loop__TO__backedge[i0] -> [i0, 0] };
 ; CHECK-NEXT:         MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK-NEXT:             { Stmt_loop__TO__backedge[i0] -> MemRef_merge__phi[] };
-; CHECK-NEXT:         MayWriteAccess :=   [Reduction Type: NONE] [Scalar: 1]
-; CHECK-NEXT:             { Stmt_loop__TO__backedge[i0] -> MemRef_merge__phi[] };
-; CHECK-NEXT:         MayWriteAccess :=   [Reduction Type: NONE] [Scalar: 1]
-; CHECK-NEXT:             { Stmt_loop__TO__backedge[i0] -> MemRef_merge__phi[] };
 ; CHECK-NEXT:   Stmt_backedge
 ; CHECK-NEXT:         Domain :=
 ; CHECK-NEXT:             { Stmt_backedge[i0] : i0 <= 100 and i0 >= 0 };
Index: test/ScopInfo/invariant-loads-leave-read-only-statements.ll
===================================================================
--- test/ScopInfo/invariant-loads-leave-read-only-statements.ll
+++ test/ScopInfo/invariant-loads-leave-read-only-statements.ll
@@ -9,24 +9,36 @@
 ; CHECK-NEXT:            Schedule :=
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_top_split[] -> [0, 0, 0, 0] };
 ; CHECK-NEXT:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_top_split[] -> MemRef_25[] };
-; CHECK-NEXT:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_top_split[] -> MemRef_26[] };
+; CHECK-NEXT:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_top_split[] -> MemRef_25[] };
 ; CHECK-NEXT:      Stmt_L_4
 ; CHECK-NEXT:            Domain :=
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] : i0 >= 0 and i0 <= -1 + p_0 and i1 >= 0 and i1 <= -1 + p_0 and i2 >= 0 and i2 <= -1 + p_0 };
 ; CHECK-NEXT:            Schedule :=
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> [1, i0, i1, i2] };
 ; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_25[] };
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_22[] };
+; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_19[] };
 ; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_19[i1, i0] };
+; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_8[] };
+; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_5[] };
 ; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_5[i2, i0] };
+; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_15[] };
+; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_12[] };
 ; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_12[i2, i1] };
 ; CHECK-NEXT:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_19[i1, i0] };
+; CHECK-NEXT:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:                [p_0, p_1, p_2] -> { Stmt_L_4[i0, i1, i2] -> MemRef_25[] };
 ; CHECK-NEXT:    }
 ;
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
Index: test/ScopInfo/many-scalar-dependences.ll
===================================================================
--- test/ScopInfo/many-scalar-dependences.ll
+++ test/ScopInfo/many-scalar-dependences.ll
@@ -91,12 +91,12 @@
 ; CHECK:           { Stmt_bb12[i0, i1, i2] -> [i0, 2, i1, 2, i2, 3] };
 ; CHECK:       ReadAccess :=       [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:           { Stmt_bb12[i0, i1, i2] -> MemRef_x_3__phi[] };
-; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:           { Stmt_bb12[i0, i1, i2] -> MemRef_x_3[] };
 ; CHECK:       ReadAccess :=       [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:           { Stmt_bb12[i0, i1, i2] -> MemRef_a[i0, i1] };
 ; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:           { Stmt_bb12[i0, i1, i2] -> MemRef_a[i0, i1] };
+; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
+; CHECK:           { Stmt_bb12[i0, i1, i2] -> MemRef_x_3[] };
 ; CHECK: Stmt_bb16
 ; CHECK:       Domain :=
 ; CHECK:           { Stmt_bb16[i0, i1, i2] : i0 <= 99 and i0 >= 0 and i1 <= 99 and i1 >= 0 and i2 <= 99 and i2 >= 0 };
Index: test/ScopInfo/non_affine_region_2.ll
===================================================================
--- test/ScopInfo/non_affine_region_2.ll
+++ test/ScopInfo/non_affine_region_2.ll
@@ -35,10 +35,6 @@
 ; CHECK-NEXT:        { Stmt_bb3__TO__bb18[i0] -> MemRef_A[i0] };
 ; CHECK-NOT:         { Stmt_bb3__TO__bb18[i0] -> MemRef_x_0[] };
 ; CHECK-NOT:         { Stmt_bb3__TO__bb18[i0] -> MemRef_x_1[] };
-; CHECK:         MayWriteAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK-NEXT:        { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] };
-; CHECK-NOT:         { Stmt_bb3__TO__bb18[i0] -> MemRef_x_0[] };
-; CHECK-NOT:         { Stmt_bb3__TO__bb18[i0] -> MemRef_x_1[] };
 ; CHECK:         MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK-NEXT:        { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] };
 ; CHECK-NOT:         { Stmt_bb3__TO__bb18[i0] -> MemRef_x_0[] };
Index: test/ScopInfo/non_affine_region_3.ll
===================================================================
--- test/ScopInfo/non_affine_region_3.ll
+++ test/ScopInfo/non_affine_region_3.ll
@@ -31,12 +31,6 @@
 ; CHECK:             { Stmt_bb3__TO__bb18[i0] -> [i0, 0] };
 ; CHECK:         ReadAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:             { Stmt_bb3__TO__bb18[i0] -> MemRef_A[i0] };
-; CHECK:         MayWriteAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:             { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] };
-; CHECK:         MayWriteAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:             { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] };
-; CHECK:         MayWriteAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:             { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] };
 ; CHECK:         MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:             { Stmt_bb3__TO__bb18[i0] -> MemRef_x_2__phi[] };
 ; CHECK:   Stmt_bb18
Index: test/ScopInfo/non_affine_region_4.ll
===================================================================
--- test/ScopInfo/non_affine_region_4.ll
+++ test/ScopInfo/non_affine_region_4.ll
@@ -16,14 +16,14 @@
 ;
 ; CHECK: Arrays {
 ; CHECK:   i32 MemRef_A[*];
-; CHECK:   i32 MemRef_x; [BasePtrOrigin: MemRef_A]
 ; CHECK:   i32 MemRef_y__phi;
+; CHECK:   i32 MemRef_x; [BasePtrOrigin: MemRef_A]
 ; CHECK: }
 ;
 ; CHECK: Arrays (Bounds as pw_affs) {
 ; CHECK:   i32 MemRef_A[*];
-; CHECK:   i32 MemRef_x; [BasePtrOrigin: MemRef_A]
 ; CHECK:   i32 MemRef_y__phi;
+; CHECK:   i32 MemRef_x; [BasePtrOrigin: MemRef_A]
 ; CHECK: }
 ;
 ; CHECK:      Stmt_bb2__TO__bb7
@@ -38,11 +38,9 @@
 ; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:                { Stmt_bb2__TO__bb7[i0] -> MemRef_A[i0] };
 ; CHECK:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:                { Stmt_bb2__TO__bb7[i0] -> MemRef_x[] };
-; CHECK:            MayWriteAccess := [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:                { Stmt_bb2__TO__bb7[i0] -> MemRef_y__phi[] };
 ; CHECK:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:                { Stmt_bb2__TO__bb7[i0] -> MemRef_y__phi[] };
+; CHECK:                { Stmt_bb2__TO__bb7[i0] -> MemRef_x[] };
 ; CHECK:      Stmt_bb7
 ; CHECK:            Domain :=
 ; CHECK:                { Stmt_bb7[i0] :
@@ -53,9 +51,9 @@
 ; CHECK:            Schedule :=
 ; CHECK:                { Stmt_bb7[i0] -> [i0, 1] };
 ; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:                { Stmt_bb7[i0] -> MemRef_x[] };
-; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:                { Stmt_bb7[i0] -> MemRef_y__phi[] };
+; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK:                { Stmt_bb7[i0] -> MemRef_x[] };
 ; CHECK:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:                { Stmt_bb7[i0] -> MemRef_A[i0] };
 ;
Index: test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll
===================================================================
--- test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll
+++ test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll
@@ -1,3 +1,4 @@
+
 ; RUN: opt %loadPolly -polly-scops -analyze < %s | FileCheck %s
 
 ; CHECK: MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
Index: test/ScopInfo/phi_condition_modeling_2.ll
===================================================================
--- test/ScopInfo/phi_condition_modeling_2.ll
+++ test/ScopInfo/phi_condition_modeling_2.ll
@@ -32,12 +32,12 @@
 ; CHECK-NOT: Access
 ; CHECK-LABEL:      Stmt_bb8b
 ; CHECK-NOT: Access
-; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:                [N, c] -> { Stmt_bb8b[i0] -> MemRef_tmp_0[] };
-; CHECK-NOT: Access
 ; CHECK:            MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:                [N, c] -> { Stmt_bb8b[i0] -> MemRef_A[i0] };
 ; CHECK-NOT: Access
+; CHECK:            ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK:                [N, c] -> { Stmt_bb8b[i0] -> MemRef_tmp_0[] };
+; CHECK-NOT: Access
 ; CHECK:    }
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
Index: test/ScopInfo/phi_loop_carried_float.ll
===================================================================
--- test/ScopInfo/phi_loop_carried_float.ll
+++ test/ScopInfo/phi_loop_carried_float.ll
@@ -21,10 +21,10 @@
 ; CHECK:                  [N] -> { Stmt_bb4[i0] -> MemRef_tmp_0__phi[] };
 ; CHECK-NOT: Access
 ; CHECK:              ReadAccess := [Reduction Type: NONE]
-; CHECK:                  [N] -> { Stmt_bb4[i0] -> MemRef_tmp_0[] };
+; CHECK:                  [N] -> { Stmt_bb4[i0] -> MemRef_A[i0] };
 ; CHECK-NOT: Access
 ; CHECK:              ReadAccess := [Reduction Type: NONE]
-; CHECK:                  [N] -> { Stmt_bb4[i0] -> MemRef_A[i0] };
+; CHECK:                  [N] -> { Stmt_bb4[i0] -> MemRef_tmp_0[] };
 ; CHECK-NOT: Access
 ; CHECK:      }
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
Index: test/ScopInfo/phi_scalar_simple_1.ll
===================================================================
--- test/ScopInfo/phi_scalar_simple_1.ll
+++ test/ScopInfo/phi_scalar_simple_1.ll
@@ -29,9 +29,6 @@
 ; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:           [N] -> { Stmt_for_cond[i0] -> MemRef_x_addr_0[] };
 ; CHECK-NOT: Access
-; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:           [N] -> { Stmt_for_cond[i0] -> MemRef_x_addr_0[] };
-; CHECK-NOT: Access
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc4 ], [ 1, %entry ]
   %x.addr.0 = phi i32 [ %x, %entry ], [ %x.addr.1.lcssa, %for.inc4 ]
   %cmp = icmp slt i64 %indvars.iv, %tmp
@@ -71,12 +68,12 @@
 ; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:           [N] -> { Stmt_for_inc[i0, i1] -> MemRef_x_addr_1__phi[] };
 ; CHECK-NOT: Access
-; CHECK:       ReadAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:           [N] -> { Stmt_for_inc[i0, i1] -> MemRef_x_addr_1[] };
-; CHECK-NOT: Access
 ; CHECK:       ReadAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:           [N] -> { Stmt_for_inc[i0, i1] -> MemRef_A[1 + i0] };
 ; CHECK-NOT: Access
+; CHECK:       ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK:           [N] -> { Stmt_for_inc[i0, i1] -> MemRef_x_addr_1[] };
+; CHECK-NOT: Access
   %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
   %tmp1 = load i32, i32* %arrayidx, align 4
   %add = add nsw i32 %x.addr.1, %tmp1
Index: test/ScopInfo/phi_scalar_simple_2.ll
===================================================================
--- test/ScopInfo/phi_scalar_simple_2.ll
+++ test/ScopInfo/phi_scalar_simple_2.ll
@@ -28,9 +28,6 @@
 ; CHECK:     MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:         [N, c] -> { Stmt_for_cond[i0] -> MemRef_A[i0] };
 ; CHECK-NOT: Access
-; CHECK:     MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:         [N, c] -> { Stmt_for_cond[i0] -> MemRef_x_addr_0[] };
-; CHECK-NOT: Access
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc5 ], [ 0, %entry ]
   %x.addr.0 = phi i32 [ %x, %entry ], [ %x.addr.1, %for.inc5 ]
   %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
@@ -58,12 +55,6 @@
 ; CHECK:     ReadAccess := [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:         [N, c] -> { Stmt_for_cond1[i0, i1] -> MemRef_x_addr_1__phi[] };
 ; CHECK-NOT: Access
-; CHECK:     MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:         [N, c] -> { Stmt_for_cond1[i0, i1] -> MemRef_x_addr_1[] };
-; CHECK-NOT: Access
-; CHECK:     MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
-; CHECK:         [N, c] -> { Stmt_for_cond1[i0, i1] -> MemRef_x_addr_1[] };
-; CHECK-NOT: Access
   %x.addr.1 = phi i32 [ %x.addr.0, %for.body ], [ %x.addr.2, %for.inc ]
   %j.0 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ]
   %exitcond = icmp ne i32 %j.0, %N
@@ -84,12 +75,12 @@
 if.then:                                          ; preds = %for.body3
 ; CHECK-LABEL: Stmt_if_then
 ; CHECK-NOT: Access
-; CHECK:     ReadAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK:         [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_x_addr_1[] };
-; CHECK-NOT: Access
 ; CHECK:     ReadAccess := [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:         [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_A[i0] };
 ; CHECK-NOT: Access
+; CHECK:     ReadAccess := [Reduction Type: NONE] [Scalar: 1]
+; CHECK:         [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_x_addr_1[] };
+; CHECK-NOT: Access
 ; CHECK:     MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:         [N, c] -> { Stmt_if_then[i0, i1] -> MemRef_x_addr_2__phi[] };
 ; CHECK-NOT: Access
Index: test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll
===================================================================
--- test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll
+++ test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll
@@ -6,16 +6,12 @@
 
 ; CHECK: Arrays {
 ; CHECK:     float MemRef_A[*]; // Element size 4
-; CHECK:     float* MemRef_A; // Element size 8
 ; CHECK:     float* MemRef_x__phi; // Element size 8
-; CHECK:     float* MemRef_B; // Element size 8
 ; CHECK:     float* MemRef_C[*]; // Element size 8
 ; CHECK: }
 ; CHECK: Arrays (Bounds as pw_affs) {
 ; CHECK:     float MemRef_A[*]; // Element size 4
-; CHECK:     float* MemRef_A; // Element size 8
 ; CHECK:     float* MemRef_x__phi; // Element size 8
-; CHECK:     float* MemRef_B; // Element size 8
 ; CHECK:     float* MemRef_C[*]; // Element size 8
 ; CHECK: }
 ; CHECK: Alias Groups (0):
@@ -28,8 +24,6 @@
 ; CHECK:             [p] -> { Stmt_then[i0] -> [i0, 1] };
 ; CHECK:         MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:             [p] -> { Stmt_then[i0] -> MemRef_A[0] };
-; CHECK:         ReadAccess :=       [Reduction Type: NONE] [Scalar: 1]
-; CHECK:             [p] -> { Stmt_then[i0] -> MemRef_A[] };
 ; CHECK:         MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:             [p] -> { Stmt_then[i0] -> MemRef_x__phi[] };
 ; CHECK:   Stmt_else
@@ -39,8 +33,6 @@
 ; CHECK:             [p] -> { Stmt_else[i0] -> [i0, 0] : p >= 33 or p <= 31 };
 ; CHECK:         MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:             [p] -> { Stmt_else[i0] -> MemRef_A[0] };
-; CHECK:         ReadAccess :=       [Reduction Type: NONE] [Scalar: 1]
-; CHECK:             [p] -> { Stmt_else[i0] -> MemRef_B[] };
 ; CHECK:         MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:             [p] -> { Stmt_else[i0] -> MemRef_x__phi[] };
 ; CHECK:   Stmt_bb8
Index: test/ScopInfo/read-only-scalar-used-in-phi.ll
===================================================================
--- test/ScopInfo/read-only-scalar-used-in-phi.ll
+++ test/ScopInfo/read-only-scalar-used-in-phi.ll
@@ -18,6 +18,8 @@
 ; CHECK:           { Stmt_next[] -> MemRef_sum[] };
 ; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
 ; CHECK:           { Stmt_next[] -> MemRef_phisum__phi[] };
+; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
+; CHECK:           { Stmt_next[] -> MemRef_phisummerge[] };
 ; CHECK: Stmt_bb1
 ; CHECK:       Domain :=
 ; CHECK:           { Stmt_bb1[i0] : i0 <= 100 and i0 >= 0 };
@@ -29,6 +31,8 @@
 ; CHECK:           { Stmt_bb1[i0] -> MemRef_phisum__phi[] };
 ; CHECK:       ReadAccess :=       [Reduction Type: NONE] [Scalar: 0]
 ; CHECK:           { Stmt_bb1[i0] -> MemRef_A[i0] };
+; CHECK:       MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 1]
+; CHECK:           { Stmt_bb1[i0] -> MemRef_phisummerge[] };
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
Index: test/ScopInfo/same-base-address-scalar-and-array.ll
===================================================================
--- test/ScopInfo/same-base-address-scalar-and-array.ll
+++ test/ScopInfo/same-base-address-scalar-and-array.ll
@@ -4,7 +4,6 @@
 ; as it is used as a memory base pointer (%0) but also as a scalar (%out.addr.0.lcssa).
 ;
 ; CHECK:         Arrays {
-; CHECK-NEXT:        float* MemRef_out; // Element size 8
 ; CHECK-NEXT:        float* MemRef_out_addr_0_lcssa; // Element size 8
 ; CHECK-NEXT:        float MemRef_out[*]; // Element size 4
 ; CHECK-NEXT:    }
Index: test/ScopInfo/scalar.ll
===================================================================
--- test/ScopInfo/scalar.ll
+++ test/ScopInfo/scalar.ll
@@ -53,7 +53,7 @@
 ; CHECK:           [N] -> { Stmt_S2[i0] : i0 >= 0 and i0 <= -1 + N };
 ; CHECK:       Schedule :=
 ; CHECK:           [N] -> { Stmt_S2[i0] -> [i0, 1] };
-; CHECK:       ReadAccess :=
-; CHECK:           [N] -> { Stmt_S2[i0] -> MemRef_val[] };
 ; CHECK:       MustWriteAccess :=
 ; CHECK:           [N] -> { Stmt_S2[i0] -> MemRef_a[i0] };
+; CHECK:       ReadAccess :=
+; CHECK:           [N] -> { Stmt_S2[i0] -> MemRef_val[] };
Index: test/ScopInfo/tempscop-printing.ll
===================================================================
--- test/ScopInfo/tempscop-printing.ll
+++ test/ScopInfo/tempscop-printing.ll
@@ -77,10 +77,10 @@
   %indvar.j = phi i64 [ 0, %entry.next ], [ %indvar.j.next, %for.j ]
   %scevgep = getelementptr i64, i64* %A, i64 %indvar.j
   store i64 %init, i64* %scevgep
-; CHECK:          ReadAccess :=       [Reduction Type: NONE] [Scalar: 1]
-; CHECK-NEXT:         [N] -> { Stmt_for_j[i0, i1] -> MemRef_init[] };
 ; CHECK:          MustWriteAccess :=  [Reduction Type: NONE] [Scalar: 0]
 ; CHECK-NEXT:         [N] -> { Stmt_for_j[i0, i1] -> MemRef_A[i1] };
+; CHECK:          ReadAccess :=       [Reduction Type: NONE] [Scalar: 1]
+; CHECK-NEXT:         [N] -> { Stmt_for_j[i0, i1] -> MemRef_init[] };
   %indvar.j.next = add nsw i64 %indvar.j, 1
   %exitcond.j = icmp eq i64 %indvar.j.next, %N
   br i1 %exitcond.j, label %for.i.end, label %for.j