diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -352,9 +352,9 @@ const DataLayout &DL) { do { Type *SrcTy = C->getType(); - uint64_t DestSize = DL.getTypeSizeInBits(DestTy); - uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); - if (SrcSize < DestSize) + TypeSize DestSize = DL.getTypeSizeInBits(DestTy); + TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy); + if (!TypeSize::isKnownGE(SrcSize, DestSize)) return nullptr; // Catch the obvious splat cases (since all-zeros can coerce non-integral diff --git a/llvm/test/Transforms/InstCombine/vscale_load.ll b/llvm/test/Transforms/InstCombine/vscale_load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/vscale_load.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S -verify | FileCheck %s + +define <2 x i8> @constprop_load_bitcast(* %ptr) { +; CHECK-LABEL: @constprop_load_bitcast( +; CHECK-NEXT: store zeroinitializer, * [[PTR:%.*]], align 16 +; CHECK-NEXT: ret <2 x i8> zeroinitializer +; + store zeroinitializer, * %ptr, align 16 + %cast_to_fixed = bitcast * %ptr to <2 x i8>* + %a = load <2 x i8>, <2 x i8>* %cast_to_fixed, align 16 + ret <2 x i8> %a +} + +; vscale-sized vec not guaranteed to fill destination. +define <8 x i8> @constprop_load_bitcast_neg(* %ptr) { +; CHECK-LABEL: @constprop_load_bitcast_neg( +; CHECK-NEXT: store zeroinitializer, * [[PTR:%.*]], align 16 +; CHECK-NEXT: [[CAST_TO_FIXED:%.*]] = bitcast * [[PTR]] to <8 x i8>* +; CHECK-NEXT: [[A:%.*]] = load <8 x i8>, <8 x i8>* [[CAST_TO_FIXED]], align 16 +; CHECK-NEXT: ret <8 x i8> [[A]] +; + store zeroinitializer, * %ptr, align 16 + %cast_to_fixed = bitcast * %ptr to <8 x i8>* + %a = load <8 x i8>, <8 x i8>* %cast_to_fixed, align 16 + ret <8 x i8> %a +}