diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h --- a/llvm/include/llvm/Analysis/MemoryBuiltins.h +++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h @@ -221,6 +221,7 @@ SizeOffsetType visitInstruction(Instruction &I); private: + SizeOffsetType computeImpl(Value *V); bool CheckedZextOrTrunc(APInt &I); }; diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -573,10 +573,34 @@ } SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { + unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); + + // Stripping pointer casts can strip address space casts which can change the + // index type size. The invariant is that we use the value type to determine + // the index type size. If we stripped address space casts we "repair" the + // APInt as we pass it upwards. For the caller, the APInt matches the type of + // the argument value V (or better the type of it). + V = V->stripPointerCasts(); + + // Later we use the index type size and zero but it will match the type of the + // value that is passed to computeImpl. IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); Zero = APInt::getZero(IntTyBits); - V = V->stripPointerCasts(); + if (InitialIntTyBits == IntTyBits) + return computeImpl(V); + + // We stripped an address space cast that changed the index type size. + // Readjust it to match the argument's index type size. + SizeOffsetType SOT = computeImpl(V); + if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) + return unknown(); + if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) + return unknown(); + return SOT; +} + +SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { if (Instruction *I = dyn_cast(V)) { // If we have already seen this instruction, bail out. Cycles can happen in // unreachable code after constant propagation. diff --git a/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll b/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll --- a/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll +++ b/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll @@ -1,6 +1,6 @@ ; RUN: opt -instcombine -S < %s | FileCheck %s -target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128-p7:32:32" target triple = "x86_64-apple-macosx10.14.0" ; Function Attrs: nounwind ssp uwtable @@ -152,6 +152,19 @@ ; CHECK-NEXT: br i1 false, label %if.else, label %if.then ; CHECK: call void @fortified_chk(i8* %obj, i64 [[SZ]]) +@p7 = internal addrspace(7) global i8 0 + +; Gracefully handle AS cast when the address spaces have different pointer widths. +define i64 @as_cast(i1 %c) { +ntry: + %p0 = tail call i8* @malloc(i64 64) + %gep = getelementptr i8, i8 addrspace(7)* @p7, i32 1 + %as = addrspacecast i8 addrspace(7)* %gep to i8* + %select = select i1 %c, i8* %p0, i8* %as + %calc_size = tail call i64 @llvm.objectsize.i64.p0i8(i8* %select, i1 false, i1 true, i1 true) + ret i64 %calc_size +} + declare void @bury(i32) local_unnamed_addr #2 ; Function Attrs: nounwind allocsize(0)