diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -37,23 +37,27 @@ if (StoreSize < DL.getTypeSizeInBits(LoadTy).getFixedSize()) return false; + bool StoredNI = DL.isNonIntegralPointerType(StoredTy->getScalarType()); + bool LoadNI = DL.isNonIntegralPointerType(LoadTy->getScalarType()); // Don't coerce non-integral pointers to integers or vice versa. - if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()) != - DL.isNonIntegralPointerType(LoadTy->getScalarType())) { + if (StoredNI != LoadNI) { // As a special case, allow coercion of memset used to initialize // an array w/null. Despite non-integral pointers not generally having a // specific bit pattern, we do assume null is zero. if (auto *CI = dyn_cast(StoredVal)) return CI->isNullValue(); return false; + } else if (StoredNI && LoadNI && + StoredTy->getPointerAddressSpace() != + LoadTy->getPointerAddressSpace()) { + return false; } // The implementation below uses inttoptr for vectors of unequal size; we // can't allow this for non integral pointers. We could teach it to extract // exact subvectors if desired. - if (DL.isNonIntegralPointerType(StoredTy->getScalarType()) && - StoreSize != DL.getTypeSizeInBits(LoadTy).getFixedSize()) + if (StoredNI && StoreSize != DL.getTypeSizeInBits(LoadTy).getFixedSize()) return false; return true; diff --git a/llvm/test/Transforms/GVN/non-integral-pointers.ll b/llvm/test/Transforms/GVN/non-integral-pointers.ll --- a/llvm/test/Transforms/GVN/non-integral-pointers.ll +++ b/llvm/test/Transforms/GVN/non-integral-pointers.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -gvn -S < %s | FileCheck %s -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4" +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4:5" target triple = "x86_64-unknown-linux-gnu" define void @f0(i1 %alwaysFalse, i64 %val, i64* %loc) { @@ -430,3 +430,20 @@ declare void @use.v2(<2 x i64 addrspace(4)*>) declare void @use.v4(<4 x i64 addrspace(4)*>) + define i8 addrspace(5)* @multini(i1 %alwaysFalse, i8 addrspace(4)* %val, i8 addrspace(4)** %loc) { + ; CHECK-LABEL: @multini( + ; CHECK-NOT: inttoptr + ; CHECK-NOT: ptrtoint + ; CHECK-NOT: addrspacecast + entry: + store i8 addrspace(4)* %val, i8 addrspace(4)** %loc + br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken + + neverTaken: + %loc.bc = bitcast i8 addrspace(4)** %loc to i8 addrspace(5)** + %differentas = load i8 addrspace(5)*, i8 addrspace(5)** %loc.bc + ret i8 addrspace(5)* %differentas + + alwaysTaken: + ret i8 addrspace(5)* null + }