Index: lib/Transforms/Scalar/SROA.cpp =================================================================== --- lib/Transforms/Scalar/SROA.cpp +++ lib/Transforms/Scalar/SROA.cpp @@ -3627,9 +3627,12 @@ PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); StoreInst *PStore = IRB.CreateAlignedStore( - PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr, - APInt(DL.getPointerSizeInBits(), PartOffset), - PartPtrTy, StoreBasePtr->getName() + "."), + PLoad, + getAdjustedPtr( + IRB, DL, StoreBasePtr, + APInt(DL.getPointerSizeInBits(SI->getPointerAddressSpace()), + PartOffset), + PartPtrTy, StoreBasePtr->getName() + "."), getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); @@ -3698,7 +3701,8 @@ int Idx = 0, Size = Offsets.Splits.size(); for (;;) { auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); - auto *PartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); + auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); + auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); // Either lookup a split load or create one. LoadInst *PLoad; @@ -3707,9 +3711,11 @@ } else { IRB.SetInsertPoint(LI); PLoad = IRB.CreateAlignedLoad( - getAdjustedPtr(IRB, DL, LoadBasePtr, - APInt(DL.getPointerSizeInBits(), PartOffset), - PartPtrTy, LoadBasePtr->getName() + "."), + getAdjustedPtr( + IRB, DL, LoadBasePtr, + APInt(DL.getPointerSizeInBits(LI->getPointerAddressSpace()), + PartOffset), + LoadPartPtrTy, LoadBasePtr->getName() + "."), getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, LI->getName()); } @@ -3717,9 +3723,12 @@ // And store this partition. IRB.SetInsertPoint(SI); StoreInst *PStore = IRB.CreateAlignedStore( - PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr, - APInt(DL.getPointerSizeInBits(), PartOffset), - PartPtrTy, StoreBasePtr->getName() + "."), + PLoad, + getAdjustedPtr( + IRB, DL, StoreBasePtr, + APInt(DL.getPointerSizeInBits(SI->getPointerAddressSpace()), + PartOffset), + StorePartPtrTy, StoreBasePtr->getName() + "."), getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); // Now build a new slice for the alloca. Index: test/Transforms/SROA/address-spaces.ll =================================================================== --- test/Transforms/SROA/address-spaces.ll +++ test/Transforms/SROA/address-spaces.ll @@ -83,3 +83,31 @@ store i32 addrspace(3)* @l, i32 addrspace(3)** %3, align 8 ret void } + +; Test load from and store to non-zero address space. +define void @test_load_store_diff_addr_space([2 x float] addrspace(1)* %complex1, [2 x float] addrspace(1)* %complex2) { +; CHECK-LABEL: @test_load_store_diff_addr_space +; CHECK-NOT: alloca +; CHECK: load i32, i32 addrspace(1)* +; CHECK: load i32, i32 addrspace(1)* +; CHECK: store i32 %{{.*}}, i32 addrspace(1)* +; CHECK: store i32 %{{.*}}, i32 addrspace(1)* + %a = alloca i64 + %a.cast = bitcast i64* %a to [2 x float]* + %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0 + %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1 + %complex1.gep = getelementptr [2 x float], [2 x float] addrspace(1)* %complex1, i32 0, i32 0 + %p1 = bitcast float addrspace(1)* %complex1.gep to i64 addrspace(1)* + %v1 = load i64, i64 addrspace(1)* %p1 + store i64 %v1, i64* %a + %f1 = load float, float* %a.gep1 + %f2 = load float, float* %a.gep2 + %sum = fadd float %f1, %f2 + store float %sum, float* %a.gep1 + store float %sum, float* %a.gep2 + %v2 = load i64, i64* %a + %complex2.gep = getelementptr [2 x float], [2 x float] addrspace(1)* %complex2, i32 0, i32 0 + %p2 = bitcast float addrspace(1)* %complex2.gep to i64 addrspace(1)* + store i64 %v2, i64 addrspace(1)* %p2 + ret void +}