Index: lib/Transforms/Scalar/GVN.cpp =================================================================== --- lib/Transforms/Scalar/GVN.cpp +++ lib/Transforms/Scalar/GVN.cpp @@ -858,11 +858,12 @@ const DataLayout &DL = LI->getModule()->getDataLayout(); + Instruction *DepInst = DepInfo.getInst(); if (DepInfo.isClobber()) { // If the dependence is to a store that writes to a superset of the bits // read by the load, we can extract the bits we need for the load from the // stored value. - if (StoreInst *DepSI = dyn_cast(DepInfo.getInst())) { + if (StoreInst *DepSI = dyn_cast(DepInst)) { // Can't forward from non-atomic to atomic without violating memory model. if (Address && LI->isAtomic() <= DepSI->isAtomic()) { int Offset = @@ -878,7 +879,7 @@ // load i32* P // load i8* (P+1) // if we have this, replace the later with an extraction from the former. - if (LoadInst *DepLI = dyn_cast(DepInfo.getInst())) { + if (LoadInst *DepLI = dyn_cast(DepInst)) { // If this is a clobber and L is the first instruction in its block, then // we have the first instruction in the entry block. // Can't forward from non-atomic to atomic without violating memory model. @@ -895,7 +896,7 @@ // If the clobbering value is a memset/memcpy/memmove, see if we can // forward a value on from it. - if (MemIntrinsic *DepMI = dyn_cast(DepInfo.getInst())) { + if (MemIntrinsic *DepMI = dyn_cast(DepInst)) { if (Address && !LI->isAtomic()) { int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address, DepMI, DL); @@ -909,8 +910,7 @@ LLVM_DEBUG( // fast print dep, using operator<< on instruction is too slow. dbgs() << "GVN: load "; LI->printAsOperand(dbgs()); - Instruction *I = DepInfo.getInst(); - dbgs() << " is clobbered by " << *I << '\n';); + dbgs() << " is clobbered by " << *DepInst << '\n';); if (ORE->allowExtraAnalysis(DEBUG_TYPE)) reportMayClobberedLoad(LI, DepInfo, DT, ORE); @@ -918,8 +918,6 @@ } assert(DepInfo.isDef() && "follows from above"); - Instruction *DepInst = DepInfo.getInst(); - // Loading the allocation -> undef. if (isa(DepInst) || isMallocLikeFn(DepInst, TLI) || // Loading immediately after lifetime begin -> undef. @@ -938,8 +936,7 @@ // Reject loads and stores that are to the same address but are of // different types if we have to. If the stored value is larger or equal to // the loaded value, we can reuse it. - if (S->getValueOperand()->getType() != LI->getType() && - !canCoerceMustAliasedValueToLoad(S->getValueOperand(), + if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(), DL)) return false; @@ -955,8 +952,7 @@ // If the types mismatch and we can't handle it, reject reuse of the load. // If the stored value is larger or equal to the loaded value, we can reuse // it. - if (LD->getType() != LI->getType() && - !canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) + if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) return false; // Can't forward from non-atomic to atomic without violating memory model. Index: lib/Transforms/Utils/VNCoercion.cpp =================================================================== --- lib/Transforms/Utils/VNCoercion.cpp +++ lib/Transforms/Utils/VNCoercion.cpp @@ -14,13 +14,17 @@ /// Return true if coerceAvailableValueToLoadType will succeed. bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL) { + Type *StoredTy = StoredVal->getType(); + if (StoredTy == LoadTy) + return true; + // If the loaded or stored value is an first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy() || - StoredVal->getType()->isStructTy() || StoredVal->getType()->isArrayTy()) + StoredTy->isStructTy() || StoredTy->isArrayTy()) return false; - uint64_t StoreSize = DL.getTypeSizeInBits(StoredVal->getType()); + uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy); // The store size must be byte-aligned to support future type casts. if (llvm::alignTo(StoreSize, 8) != StoreSize) @@ -306,7 +310,7 @@ return -1; GlobalVariable *GV = dyn_cast(GetUnderlyingObject(Src, DL)); - if (!GV || !GV->isConstant()) + if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) return -1; // See if the access is within the bounds of the transfer.