Index: llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -2526,7 +2526,7 @@ } } - if (VectorType *DestVTy = dyn_cast(DestTy)) { + if (FixedVectorType *DestVTy = dyn_cast(DestTy)) { // Beware: messing with this target-specific oddity may cause trouble. if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); @@ -2555,7 +2555,7 @@ } } - if (VectorType *SrcVTy = dyn_cast(SrcTy)) { + if (FixedVectorType *SrcVTy = dyn_cast(SrcTy)) { if (SrcVTy->getNumElements() == 1) { // If our destination is not a vector, then make this a straight // scalar-scalar cast. Index: llvm/test/Transforms/InstCombine/AArch64/sve-bitcast.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/InstCombine/AArch64/sve-bitcast.ll @@ -0,0 +1,13 @@ +; RUN: opt -instcombine -mtriple=aarch64-linux-gnu -mattr=+sve -S < %s | FileCheck %s + +; We shouldn't fold bitcast(insert .., iX %val, i32 0) +; into bitcast(iX %val) for scalable vectors. +define @bitcast_of_insert_i8_i16(i16 %val) #0 { +; CHECK-LABEL: @bitcast_of_insert_i8_i16( +; CHECK-NOT: bitcast i16 %val to +; CHECK: bitcast %op2 to +entry: + %op2 = insertelement undef, i16 %val, i32 0 + %0 = bitcast %op2 to + ret %0 +}