diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -352,9 +352,9 @@ const DataLayout &DL) { do { Type *SrcTy = C->getType(); - uint64_t DestSize = DL.getTypeSizeInBits(DestTy); - uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); - if (SrcSize < DestSize) + TypeSize DestSize = DL.getTypeSizeInBits(DestTy); + TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy); + if (TypeSize::isKnownLT(SrcSize, DestSize)) return nullptr; // Catch the obvious splat cases (since all-zeros can coerce non-integral diff --git a/llvm/test/Transforms/InstCombine/vscale_load_bitcast.ll b/llvm/test/Transforms/InstCombine/vscale_load_bitcast.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/vscale_load_bitcast.ll @@ -0,0 +1,13 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S -verify | FileCheck %s + +define <8 x i8> @constprop_load_bitcast() { +; CHECK-LABEL: @constprop_load_bitcast( +; CHECK-NEXT: ret <8 x i8> zeroinitializer +; + %stack_var = alloca , align 16 + store zeroinitializer, * %stack_var, align 16 + %cast_to_fixed = bitcast * %stack_var to <8 x i8>* + %a = load <8 x i8>, <8 x i8>* %cast_to_fixed, align 16 + ret <8 x i8> %a +}