diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -167,11 +167,6 @@ Value *WritePtr, uint64_t WriteSizeInBits, const DataLayout &DL) { - // If the loaded or stored value is a first class array or struct, don't try - // to transform them. We need to be able to bitcast to integer. - if (LoadTy->isStructTy() || LoadTy->isArrayTy()) - return -1; - int64_t StoreOffset = 0, LoadOffset = 0; Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL); @@ -221,21 +216,9 @@ int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL) { auto *StoredVal = DepSI->getValueOperand(); - - // Cannot handle reading from store of first-class aggregate yet. - if (StoredVal->getType()->isStructTy() || - StoredVal->getType()->isArrayTy()) + if (!canCoerceMustAliasedValueToLoad(StoredVal, LoadTy, DL)) return -1; - // Don't coerce non-integral pointers to integers or vice versa. - if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()) != - DL.isNonIntegralPointerType(LoadTy->getScalarType())) { - // Allow casts of zero values to null as a special case - auto *CI = dyn_cast(StoredVal); - if (!CI || !CI->isNullValue()) - return -1; - } - Value *StorePtr = DepSI->getPointerOperand(); uint64_t StoreSize = DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()); @@ -248,23 +231,19 @@ /// the other load can feed into the second load. int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL) { - // Cannot handle reading from store of first-class aggregate yet. - if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) - return -1; - - // Don't coerce non-integral pointers to integers or vice versa. - if (DL.isNonIntegralPointerType(DepLI->getType()->getScalarType()) != - DL.isNonIntegralPointerType(LoadTy->getScalarType())) - return -1; - Value *DepPtr = DepLI->getPointerOperand(); - uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()); - int R = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); - if (R != -1) - return R; + if (canCoerceMustAliasedValueToLoad(DepLI->getType(), LoadTy, DL)) { + uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()); + int R = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); + if (R != -1) + return R; + } - // If we have a load/load clobber an DepLI can be widened to cover this load, + // If we have a load/load clobber and DepLI can be widened to cover this load, // then we should widen it! + if (!canCoerceMustAliasedValueToLoad(LoadTy, DepLI->getType(), DL)) + return -1; + int64_t LoadOffs = 0; const Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL); @@ -294,13 +273,16 @@ // If this is memset, we just need to see if the offset is valid in the size // of the memset.. if (MI->getIntrinsicID() == Intrinsic::memset) { - if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) { - auto *CI = dyn_cast(cast(MI)->getValue()); - if (!CI || !CI->isZero()) - return -1; - } - return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), - MemSizeInBits, DL); + Value *StoredVal = cast(MI)->getValue(); + if (auto *CI = dyn_cast(StoredVal)) + if (CI->isNullValue()) + return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), + MemSizeInBits, DL); + Type *StoreTy = IntegerType::get(LoadTy->getContext(), MemSizeInBits); + if (canCoerceMustAliasedValueToLoad(StoreTy, LoadTy, DL)) + return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), + MemSizeInBits, DL); + return -1; } // If we have a memcpy/memmove, the only case we can handle is if this is a