diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -13725,8 +13725,10 @@ return SDValue(); SDValue Op = N->getOperand(0); - if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || - Op.getOpcode() != ISD::FMUL) + if (!Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL) + return SDValue(); + + if (!Op.getValueType().is64BitVector() && !Op.getValueType().is128BitVector()) return SDValue(); SDValue ConstVec = Op->getOperand(1); diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; Ensure we don't crash when trying to combine fp<->int conversions +define void @fp_convert_combine_crash(<8 x float> *%a, <8 x i32> *%b) #0 { +; CHECK-LABEL: fp_convert_combine_crash: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: fmov z1.s, #8.00000000 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: fmul z0.s, z0.s, z1.s +; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %f = load <8 x float>, <8 x float>* %a + %mul.i = fmul <8 x float> %f, + %vcvt.i = fptosi <8 x float> %mul.i to <8 x i32> + store <8 x i32> %vcvt.i, <8 x i32>* %b + ret void +} + +attributes #0 = { vscale_range(2,2) "target-features"="+sve" }