Index: lib/Analysis/Loads.cpp =================================================================== --- lib/Analysis/Loads.cpp +++ lib/Analysis/Loads.cpp @@ -210,48 +210,9 @@ if (isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT)) return true; - int64_t ByteOffset = 0; - Value *Base = V; - Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL); - - if (ByteOffset < 0) // out of bounds - return false; - - Type *BaseType = nullptr; - unsigned BaseAlign = 0; - if (const AllocaInst *AI = dyn_cast(Base)) { - // An alloca is safe to load from as load as it is suitably aligned. - BaseType = AI->getAllocatedType(); - BaseAlign = AI->getAlignment(); - } else if (const GlobalVariable *GV = dyn_cast(Base)) { - // Global variables are not necessarily safe to load from if they are - // interposed arbitrarily. Their size may change or they may be weak and - // require a test to determine if they were in fact provided. - if (!GV->isInterposable()) { - BaseType = GV->getType()->getElementType(); - BaseAlign = GV->getAlignment(); - } - } - PointerType *AddrTy = cast(V->getType()); uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType()); - // If we found a base allocated type from either an alloca or global variable, - // try to see if we are definitively within the allocated region. We need to - // know the size of the base type and the loaded type to do anything in this - // case. - if (BaseType && BaseType->isSized()) { - if (BaseAlign == 0) - BaseAlign = DL.getPrefTypeAlignment(BaseType); - - if (Align <= BaseAlign) { - // Check if the load is within the bounds of the underlying object. - if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) && - ((ByteOffset % Align) == 0)) - return true; - } - } - if (!ScanFrom) return false; Index: lib/Transforms/Scalar/SROA.cpp =================================================================== --- lib/Transforms/Scalar/SROA.cpp +++ lib/Transforms/Scalar/SROA.cpp @@ -1218,7 +1218,7 @@ if (BBI->mayWriteToMemory()) return false; - uint64_t Size = DL.getTypeStoreSizeInBits(LI->getType()); + uint64_t Size = DL.getTypeStoreSize(LI->getType()); MaxAlign = std::max(MaxAlign, LI->getAlignment()); MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; HaveLoad = true; Index: test/Transforms/SROA/addrspacecast.ll =================================================================== --- test/Transforms/SROA/addrspacecast.ll +++ test/Transforms/SROA/addrspacecast.ll @@ -286,8 +286,10 @@ define void @select_addrspacecast_gv(i1 %a, i1 %b) { ; CHECK-LABEL: @select_addrspacecast_gv( -; CHECK-NEXT: [[COND_SROA_SPECULATE_LOAD_FALSE:%.*]] = load i64, i64 addrspace(1)* @gv, align 8 -; CHECK-NEXT: [[COND_SROA_SPECULATED:%.*]] = select i1 undef, i64 undef, i64 [[COND_SROA_SPECULATE_LOAD_FALSE]] +; CHECK-NEXT: [[C:%.*]] = alloca i64, align 8 +; CHECK-NEXT: [[C_0_ASC_SROA_CAST:%.*]] = addrspacecast i64* [[C]] to i64 addrspace(1)* +; CHECK-NEXT: [[COND_IN:%.*]] = select i1 undef, i64 addrspace(1)* [[C_0_ASC_SROA_CAST]], i64 addrspace(1)* @gv +; CHECK-NEXT: [[COND:%.*]] = load i64, i64 addrspace(1)* [[COND_IN]], align 8 ; CHECK-NEXT: ret void ; %c = alloca i64, align 8 @@ -299,10 +301,11 @@ ret void } -; CHECK-LABEL: @select_addrspacecast_i8( -; CHECK: [[SEL:%.*]] = select i1 undef, i8 undef, i8 undef -; CHECK-NEXT: ret i8 [[SEL]] define i8 @select_addrspacecast_i8() { +; CHECK-LABEL: @select_addrspacecast_i8( +; CHECK-NEXT: [[RET_SROA_SPECULATED:%.*]] = select i1 undef, i8 undef, i8 undef +; CHECK-NEXT: ret i8 [[RET_SROA_SPECULATED]] +; %a = alloca i8 %b = alloca i8