diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -352,9 +352,9 @@ const DataLayout &DL) { do { Type *SrcTy = C->getType(); - uint64_t DestSize = DL.getTypeSizeInBits(DestTy); - uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); - if (SrcSize < DestSize) + TypeSize DestSize = DL.getTypeSizeInBits(DestTy); + TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy); + if (!TypeSize::isKnownGE(SrcSize, DestSize)) return nullptr; // Catch the obvious splat cases (since all-zeros can coerce non-integral diff --git a/llvm/test/Transforms/InstCombine/vscale_load.ll b/llvm/test/Transforms/InstCombine/vscale_load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/vscale_load.ll @@ -0,0 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S -verify | FileCheck %s + +define <2 x i8> @constprop_load_bitcast() { +; CHECK-LABEL: @constprop_load_bitcast( +; CHECK-NEXT: ret <2 x i8> zeroinitializer +; + %stack_var = alloca , align 16 + store zeroinitializer, * %stack_var, align 16 + %cast_to_fixed = bitcast * %stack_var to <2 x i8>* + %a = load <2 x i8>, <2 x i8>* %cast_to_fixed, align 16 + ret <2 x i8> %a +} + +; vscale-sized vec not guaranteed to fill destination. +define <8 x i8> @constprop_load_bitcast_neg() { +; CHECK-LABEL: @constprop_load_bitcast_neg( +; CHECK-NEXT: [[STACK_VAR:%.*]] = alloca , align 16 +; CHECK-NEXT: store zeroinitializer, * [[STACK_VAR]], align 16 +; CHECK-NEXT: [[CAST_TO_FIXED:%.*]] = bitcast * [[STACK_VAR]] to <8 x i8>* +; CHECK-NEXT: [[A:%.*]] = load <8 x i8>, <8 x i8>* [[CAST_TO_FIXED]], align 16 +; CHECK-NEXT: ret <8 x i8> [[A]] +; + %stack_var = alloca , align 16 + store zeroinitializer, * %stack_var, align 16 + %cast_to_fixed = bitcast * %stack_var to <8 x i8>* + %a = load <8 x i8>, <8 x i8>* %cast_to_fixed, align 16 + ret <8 x i8> %a +}