Index: llvm/lib/Target/ARM/ARMISelLowering.h =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.h +++ llvm/lib/Target/ARM/ARMISelLowering.h @@ -213,6 +213,12 @@ VADDLVu, VADDLVAs, VADDLVAu, + VMLAVs, + VMLAVu, + VMLALVs, + VMLALVu, + VMLALVAs, + VMLALVAu, SMULWB, // Signed multiply word by half word, bottom SMULWT, // Signed multiply word by half word, top Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1666,6 +1666,12 @@ case ARMISD::VADDLVu: return "ARMISD::VADDLVu"; case ARMISD::VADDLVAs: return "ARMISD::VADDLVAs"; case ARMISD::VADDLVAu: return "ARMISD::VADDLVAu"; + case ARMISD::VMLAVs: return "ARMISD::VMLAVs"; + case ARMISD::VMLAVu: return "ARMISD::VMLAVu"; + case ARMISD::VMLALVs: return "ARMISD::VMLALVs"; + case ARMISD::VMLALVu: return "ARMISD::VMLALVu"; + case ARMISD::VMLALVAs: return "ARMISD::VMLALVAs"; + case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu"; case ARMISD::UMAAL: return "ARMISD::UMAAL"; case ARMISD::UMLAL: return "ARMISD::UMLAL"; case ARMISD::SMLAL: return "ARMISD::SMLAL"; @@ -11814,8 +11820,13 @@ SDValue Hi = DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, DCI.DAG.getConstant(1, dl, MVT::i32)); SDValue Red = - DCI.DAG.getNode(OpcodeA, dl, DCI.DAG.getVTList({MVT::i32, MVT::i32}), - Lo, Hi, VecRed->getOperand(0)); + VecRed->getNumOperands() == 1 + ? DCI.DAG.getNode(OpcodeA, dl, + DCI.DAG.getVTList({MVT::i32, MVT::i32}), Lo, Hi, + VecRed->getOperand(0)) + : DCI.DAG.getNode(OpcodeA, dl, + DCI.DAG.getVTList({MVT::i32, MVT::i32}), Lo, Hi, + VecRed->getOperand(0), VecRed->getOperand(1)); return DCI.DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red, SDValue(Red.getNode(), 1)); }; @@ -11828,6 +11839,14 @@ return M; if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) return M; + if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) + return M; + if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) + return M; + if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) + return M; + if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) + return M; return SDValue(); } @@ -14019,10 +14038,17 @@ // We are looking for something that will have illegal types if left alone, // but that we can convert to a single instruction undef MVE. For example // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A + // or + // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B // Cases: // VADDV u/s 8/16/32 + // VMLAV u/s 8/16/32 // VADDLV u/s 32 + // VMLALV u/s 16/32 + // TODOD: + // VMLSV + // VMLSLV auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef ExtTypes) { if (ResVT != RetTy || N0->getOpcode() != ExtendCode) @@ -14032,6 +14058,20 @@ return A; return SDValue(); }; + auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef ExtTypes, SDValue &A, SDValue &B) { + if (ResVT != RetTy || N0->getOpcode() != ISD::MUL) + return false; + SDValue ExtA = N0->getOperand(0); + SDValue ExtB = N0->getOperand(1); + if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode) + return false; + A = ExtA->getOperand(0); + B = ExtB->getOperand(0); + if (A.getValueType() == B.getValueType() && + llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) + return true; + return false; + }; auto Create64bitNode = [&](unsigned Opcode, ArrayRef Ops) { SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops); return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node, SDValue(Node.getNode(), 1)); @@ -14046,6 +14086,15 @@ if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) return Create64bitNode(ARMISD::VADDLVu, {A}); + SDValue A, B; + if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) + return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B); + if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) + return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B); + if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B)) + return Create64bitNode(ARMISD::VMLALVs, {A, B}); + if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B)) + return Create64bitNode(ARMISD::VMLALVu, {A, B}); return SDValue(); } Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -942,6 +942,47 @@ defm MVE_VMLSDAV : MVE_VMLSDAV_multi; defm MVE_VMLSDAV : MVE_VMLSDAV_multi; +def SDTVecReduce2 : SDTypeProfile<1, 2, [ // VMLAV + SDTCisInt<0>, SDTCisVec<1>, SDTCisVec<2> +]>; +def SDTVecReduce2L : SDTypeProfile<2, 2, [ // VMLALV + SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2>, SDTCisVec<3> +]>; +def SDTVecReduce2LA : SDTypeProfile<2, 4, [ // VMLALVA + SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>, SDTCisInt<3>, + SDTCisVec<4>, SDTCisVec<5> +]>; +def ARMVMLAVs : SDNode<"ARMISD::VMLAVs", SDTVecReduce2>; +def ARMVMLAVu : SDNode<"ARMISD::VMLAVu", SDTVecReduce2>; +def ARMVMLALVs : SDNode<"ARMISD::VMLALVs", SDTVecReduce2L>; +def ARMVMLALVu : SDNode<"ARMISD::VMLALVu", SDTVecReduce2L>; +def ARMVMLALVAs : SDNode<"ARMISD::VMLALVAs", SDTVecReduce2LA>; +def ARMVMLALVAu : SDNode<"ARMISD::VMLALVAu", SDTVecReduce2LA>; + +let Predicates = [HasMVEInt] in { + def : Pat<(i32 (vecreduce_add (mul (v4i32 MQPR:$src1), (v4i32 MQPR:$src2)))), + (i32 (MVE_VMLADAVu32 $src1, $src2))>; + def : Pat<(i32 (ARMVMLAVs (v16i8 MQPR:$val1), (v16i8 MQPR:$val2))), + (i32 (MVE_VMLADAVs8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; + def : Pat<(i32 (ARMVMLAVu (v16i8 MQPR:$val1), (v16i8 MQPR:$val2))), + (i32 (MVE_VMLADAVu8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; + def : Pat<(i32 (ARMVMLAVs (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))), + (i32 (MVE_VMLADAVs8 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; + def : Pat<(i32 (ARMVMLAVu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))), + (i32 (MVE_VMLADAVu8 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; + + def : Pat<(i32 (add (i32 (vecreduce_add (mul (v4i32 MQPR:$src1), (v4i32 MQPR:$src2)))), (i32 tGPR:$src3))), + (i32 (MVE_VMLADAVau32 $src3, $src1, $src2))>; + def : Pat<(i32 (add (ARMVMLAVs (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)), GPR:$Rd)), + (i32 (MVE_VMLADAVas8 GPR:$Rd, (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; + def : Pat<(i32 (add (ARMVMLAVu (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)), GPR:$Rd)), + (i32 (MVE_VMLADAVau8 GPR:$Rd, (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; + def : Pat<(i32 (add (ARMVMLAVs (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), GPR:$Rd)), + (i32 (MVE_VMLADAVas8 GPR:$Rd, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; + def : Pat<(i32 (add (ARMVMLAVu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), GPR:$Rd)), + (i32 (MVE_VMLADAVau8 GPR:$Rd, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; +} + // vmlav aliases vmladav foreach acc = ["", "a"] in { foreach suffix = ["s8", "s16", "s32", "u8", "u16", "u32"] in { @@ -1033,6 +1074,26 @@ defm MVE_VMLALDAV : MVE_VMLALDAV_multi<"16", 0b0>; defm MVE_VMLALDAV : MVE_VMLALDAV_multi<"32", 0b1>; +let Predicates = [HasMVEInt] in { + def : Pat<(ARMVMLALVs (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), + (MVE_VMLALDAVs32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; + def : Pat<(ARMVMLALVu (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), + (MVE_VMLALDAVu32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; + def : Pat<(ARMVMLALVs (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), + (MVE_VMLALDAVs16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; + def : Pat<(ARMVMLALVu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), + (MVE_VMLALDAVu16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; + + def : Pat<(ARMVMLALVAs GPR:$Rda, GPR:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), + (MVE_VMLALDAVas32 GPR:$Rda, GPR:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; + def : Pat<(ARMVMLALVAu GPR:$Rda, GPR:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), + (MVE_VMLALDAVau32 GPR:$Rda, GPR:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; + def : Pat<(ARMVMLALVAs GPR:$Rda, GPR:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), + (MVE_VMLALDAVas16 GPR:$Rda, GPR:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; + def : Pat<(ARMVMLALVAu GPR:$Rda, GPR:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), + (MVE_VMLALDAVau16 GPR:$Rda, GPR:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; +} + // vmlalv aliases vmlaldav foreach acc = ["", "a"] in { foreach suffix = ["s16", "s32", "u16", "u32"] in { Index: llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll +++ llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll @@ -4,8 +4,7 @@ define arm_aapcs_vfpcc i32 @add_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: add_v4i32_v4i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddv.u32 r0, q0 +; CHECK-NEXT: vmlav.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %m = mul <4 x i32> %x, %y @@ -16,50 +15,8 @@ define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_zext(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: add_v4i32_v4i64_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.f32 s8, s4 -; CHECK-NEXT: vmov.f32 s12, s0 -; CHECK-NEXT: vmov.f32 s10, s5 -; CHECK-NEXT: vmov.f32 s14, s1 -; CHECK-NEXT: vmov r0, s8 -; CHECK-NEXT: vmov r1, s12 -; CHECK-NEXT: umull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov r0, s10 -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov r1, s14 -; CHECK-NEXT: vmov.f32 s8, s6 -; CHECK-NEXT: vmov.f32 s12, s2 -; CHECK-NEXT: vmov.f32 s10, s7 -; CHECK-NEXT: vmov.f32 s14, s3 -; CHECK-NEXT: umull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmov r0, s18 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: vmov r2, s17 -; CHECK-NEXT: adds r0, r0, r3 -; CHECK-NEXT: adcs r1, r2 -; CHECK-NEXT: adr r2, .LCPI1_0 -; CHECK-NEXT: vldrw.u32 q1, [r2] -; CHECK-NEXT: vand q2, q2, q1 -; CHECK-NEXT: vand q0, q3, q1 -; CHECK-NEXT: vmov r2, s8 -; CHECK-NEXT: vmov r3, s0 -; CHECK-NEXT: umlal r0, r1, r3, r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: vmov r3, s2 -; CHECK-NEXT: umlal r0, r1, r3, r2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmlalv.u32 r0, r1, q0, q1 ; CHECK-NEXT: bx lr -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI1_0: -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .long 0 @ 0x0 entry: %xx = zext <4 x i32> %x to <4 x i64> %yy = zext <4 x i32> %y to <4 x i64> @@ -71,38 +28,7 @@ define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_sext(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: add_v4i32_v4i64_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.f32 s8, s4 -; CHECK-NEXT: vmov.f32 s12, s0 -; CHECK-NEXT: vmov.f32 s10, s5 -; CHECK-NEXT: vmov.f32 s14, s1 -; CHECK-NEXT: vmov r0, s8 -; CHECK-NEXT: vmov r1, s12 -; CHECK-NEXT: smull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov r0, s10 -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov r1, s14 -; CHECK-NEXT: vmov.f32 s8, s6 -; CHECK-NEXT: vmov.f32 s10, s7 -; CHECK-NEXT: vmov.f32 s4, s2 -; CHECK-NEXT: vmov.f32 s6, s3 -; CHECK-NEXT: smull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmov r0, s18 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: vmov r2, s17 -; CHECK-NEXT: adds r0, r0, r3 -; CHECK-NEXT: vmov r3, s4 -; CHECK-NEXT: adcs r1, r2 -; CHECK-NEXT: vmov r2, s8 -; CHECK-NEXT: smlal r0, r1, r3, r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: vmov r3, s6 -; CHECK-NEXT: smlal r0, r1, r3, r2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmlalv.s32 r0, r1, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <4 x i32> %x to <4 x i64> @@ -151,46 +77,7 @@ define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_zext(<8 x i16> %x, <8 x i16> %y) { ; CHECK-LABEL: add_v8i16_v8i32_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u16 r0, q1[4] -; CHECK-NEXT: vmov.32 q2[0], r0 -; CHECK-NEXT: vmov.u16 r0, q1[5] -; CHECK-NEXT: vmov.32 q2[1], r0 -; CHECK-NEXT: vmov.u16 r0, q1[6] -; CHECK-NEXT: vmov.32 q2[2], r0 -; CHECK-NEXT: vmov.u16 r0, q1[7] -; CHECK-NEXT: vmov.32 q2[3], r0 -; CHECK-NEXT: vmov.u16 r0, q0[4] -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u16 r0, q0[5] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u16 r0, q0[6] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u16 r0, q0[7] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmovlb.u16 q2, q2 -; CHECK-NEXT: vmovlb.u16 q3, q3 -; CHECK-NEXT: vmov.u16 r0, q1[0] -; CHECK-NEXT: vmul.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u16 r0, q1[1] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u16 r0, q1[2] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u16 r0, q1[3] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmov.u16 r0, q0[0] -; CHECK-NEXT: vmovlb.u16 q1, q3 -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u16 r0, q0[1] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u16 r0, q0[2] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u16 r0, q0[3] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmovlb.u16 q0, q3 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q2 -; CHECK-NEXT: vaddv.u32 r0, q0 +; CHECK-NEXT: vmlav.u8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <8 x i16> %x to <8 x i32> @@ -203,46 +90,7 @@ define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_sext(<8 x i16> %x, <8 x i16> %y) { ; CHECK-LABEL: add_v8i16_v8i32_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u16 r0, q1[4] -; CHECK-NEXT: vmov.32 q2[0], r0 -; CHECK-NEXT: vmov.u16 r0, q1[5] -; CHECK-NEXT: vmov.32 q2[1], r0 -; CHECK-NEXT: vmov.u16 r0, q1[6] -; CHECK-NEXT: vmov.32 q2[2], r0 -; CHECK-NEXT: vmov.u16 r0, q1[7] -; CHECK-NEXT: vmov.32 q2[3], r0 -; CHECK-NEXT: vmov.u16 r0, q0[4] -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u16 r0, q0[5] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u16 r0, q0[6] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u16 r0, q0[7] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmovlb.s16 q2, q2 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmov.u16 r0, q1[0] -; CHECK-NEXT: vmul.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u16 r0, q1[1] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u16 r0, q1[2] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u16 r0, q1[3] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmov.u16 r0, q0[0] -; CHECK-NEXT: vmovlb.s16 q1, q3 -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u16 r0, q0[1] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u16 r0, q0[2] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u16 r0, q0[3] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmovlb.s16 q0, q3 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q2 -; CHECK-NEXT: vaddv.u32 r0, q0 +; CHECK-NEXT: vmlav.s8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <8 x i16> %x to <8 x i32> @@ -257,8 +105,7 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmovlb.u16 q1, q1 ; CHECK-NEXT: vmovlb.u16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddv.u32 r0, q0 +; CHECK-NEXT: vmlav.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <4 x i16> %x to <4 x i32> @@ -273,8 +120,7 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmovlb.s16 q1, q1 ; CHECK-NEXT: vmovlb.s16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddv.u32 r0, q0 +; CHECK-NEXT: vmlav.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <4 x i16> %x to <4 x i32> @@ -299,114 +145,8 @@ define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_zext(<8 x i16> %x, <8 x i16> %y) { ; CHECK-LABEL: add_v8i16_v8i64_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vmov.u16 r0, q1[0] -; CHECK-NEXT: vmov.u16 r1, q0[0] -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u16 r0, q1[1] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: adr r0, .LCPI10_0 -; CHECK-NEXT: vldrw.u32 q2, [r0] -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u16 r1, q0[1] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov r0, s12 -; CHECK-NEXT: vmov r1, s16 -; CHECK-NEXT: umull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q5[0], r0 -; CHECK-NEXT: vmov r0, s14 -; CHECK-NEXT: vmov.32 q5[1], r1 -; CHECK-NEXT: vmov r1, s18 -; CHECK-NEXT: umull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q5[2], r0 -; CHECK-NEXT: vmov.32 q5[3], r1 -; CHECK-NEXT: vmov r0, s22 -; CHECK-NEXT: vmov r3, s20 -; CHECK-NEXT: vmov r2, s21 -; CHECK-NEXT: adds.w r12, r3, r0 -; CHECK-NEXT: vmov.u16 r3, q0[2] -; CHECK-NEXT: adcs r1, r2 -; CHECK-NEXT: vmov.u16 r2, q1[2] -; CHECK-NEXT: vmov.32 q3[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[3] -; CHECK-NEXT: vmov.32 q4[0], r3 -; CHECK-NEXT: vmov.u16 r3, q0[3] -; CHECK-NEXT: vmov.32 q3[2], r2 -; CHECK-NEXT: vmov.32 q4[2], r3 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov r2, s12 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[0], r2 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmov.32 q5[1], r3 -; CHECK-NEXT: vmov r3, s18 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[2], r2 -; CHECK-NEXT: vmov.32 q5[3], r3 -; CHECK-NEXT: vmov r0, s20 -; CHECK-NEXT: vmov r2, s21 -; CHECK-NEXT: adds.w r0, r0, r12 -; CHECK-NEXT: adcs r1, r2 -; CHECK-NEXT: vmov r2, s22 -; CHECK-NEXT: adds.w r12, r0, r2 -; CHECK-NEXT: vmov.u16 r2, q1[4] -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: vmov.u16 r3, q0[4] -; CHECK-NEXT: vmov.32 q3[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[5] -; CHECK-NEXT: vmov.32 q4[0], r3 -; CHECK-NEXT: vmov.u16 r3, q0[5] -; CHECK-NEXT: vmov.32 q3[2], r2 -; CHECK-NEXT: vmov.32 q4[2], r3 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov r2, s12 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[0], r2 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmov.32 q5[1], r3 -; CHECK-NEXT: vmov r3, s18 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[2], r2 -; CHECK-NEXT: vmov.32 q5[3], r3 -; CHECK-NEXT: vmov r0, s20 -; CHECK-NEXT: vmov r2, s21 -; CHECK-NEXT: adds.w r0, r0, r12 -; CHECK-NEXT: adcs r1, r2 -; CHECK-NEXT: vmov r2, s22 -; CHECK-NEXT: adds r0, r0, r2 -; CHECK-NEXT: vmov.u16 r2, q1[6] -; CHECK-NEXT: vmov.32 q3[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[7] -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: vmov.32 q3[2], r2 -; CHECK-NEXT: vmov.u16 r3, q0[6] -; CHECK-NEXT: vand q1, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r3 -; CHECK-NEXT: vmov.u16 r3, q0[7] -; CHECK-NEXT: vmov.32 q3[2], r3 -; CHECK-NEXT: vmov r2, s4 -; CHECK-NEXT: vand q0, q3, q2 -; CHECK-NEXT: vmov r3, s0 -; CHECK-NEXT: umlal r0, r1, r3, r2 -; CHECK-NEXT: vmov r2, s6 -; CHECK-NEXT: vmov r3, s2 -; CHECK-NEXT: umlal r0, r1, r3, r2 -; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: vmlalv.u16 r0, r1, q0, q1 ; CHECK-NEXT: bx lr -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI10_0: -; CHECK-NEXT: .long 65535 @ 0xffff -; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 65535 @ 0xffff -; CHECK-NEXT: .long 0 @ 0x0 entry: %xx = zext <8 x i16> %x to <8 x i64> %yy = zext <8 x i16> %y to <8 x i64> @@ -418,77 +158,7 @@ define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_sext(<8 x i16> %x, <8 x i16> %y) { ; CHECK-LABEL: add_v8i16_v8i64_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u16 r0, q1[0] -; CHECK-NEXT: vmov.u16 r1, q0[0] -; CHECK-NEXT: sxth r0, r0 -; CHECK-NEXT: sxth r1, r1 -; CHECK-NEXT: smull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q2[0], r0 -; CHECK-NEXT: vmov.u16 r0, q1[1] -; CHECK-NEXT: vmov.32 q2[1], r1 -; CHECK-NEXT: vmov.u16 r1, q0[1] -; CHECK-NEXT: sxth r0, r0 -; CHECK-NEXT: sxth r1, r1 -; CHECK-NEXT: smull r0, r1, r1, r0 -; CHECK-NEXT: vmov.32 q2[2], r0 -; CHECK-NEXT: vmov.32 q2[3], r1 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: vmov r3, s8 -; CHECK-NEXT: vmov r0, s9 -; CHECK-NEXT: adds r2, r2, r3 -; CHECK-NEXT: vmov.u16 r3, q0[2] -; CHECK-NEXT: adc.w r12, r0, r1 -; CHECK-NEXT: vmov.u16 r1, q1[2] -; CHECK-NEXT: sxth r1, r1 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r1, r3, r3, r1 -; CHECK-NEXT: vmov.32 q2[0], r1 -; CHECK-NEXT: vmov.u16 r1, q1[3] -; CHECK-NEXT: vmov.32 q2[1], r3 -; CHECK-NEXT: vmov.u16 r3, q0[3] -; CHECK-NEXT: sxth r1, r1 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r1, r3, r3, r1 -; CHECK-NEXT: vmov.32 q2[2], r1 -; CHECK-NEXT: vmov.32 q2[3], r3 -; CHECK-NEXT: vmov r0, s8 -; CHECK-NEXT: vmov r1, s9 -; CHECK-NEXT: adds r0, r0, r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: adc.w r1, r1, r12 -; CHECK-NEXT: adds.w r12, r0, r2 -; CHECK-NEXT: vmov.u16 r2, q1[4] -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: vmov.u16 r3, q0[4] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q2[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[5] -; CHECK-NEXT: vmov.32 q2[1], r3 -; CHECK-NEXT: vmov.u16 r3, q0[5] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q2[2], r2 -; CHECK-NEXT: vmov.32 q2[3], r3 -; CHECK-NEXT: vmov r0, s8 -; CHECK-NEXT: vmov r2, s9 -; CHECK-NEXT: adds.w r0, r0, r12 -; CHECK-NEXT: adcs r1, r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: adds r0, r0, r2 -; CHECK-NEXT: vmov.u16 r2, q1[6] -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: vmov.u16 r3, q0[6] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smlal r0, r1, r3, r2 -; CHECK-NEXT: vmov.u16 r2, q1[7] -; CHECK-NEXT: vmov.u16 r3, q0[7] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smlal r0, r1, r3, r2 +; CHECK-NEXT: vmlalv.s16 r0, r1, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <8 x i16> %x to <8 x i64> @@ -552,90 +222,7 @@ define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_zext(<16 x i8> %x, <16 x i8> %y) { ; CHECK-LABEL: add_v16i8_v16i32_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.i32 q2, #0xff -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.32 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.32 q4[3], r0 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmul.i32 q3, q4, q3 -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.32 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.32 q4[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.32 q5[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.32 q5[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.32 q5[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.32 q5[3], r0 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vand q5, q5, q2 -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmul.i32 q4, q5, q4 -; CHECK-NEXT: vadd.i32 q3, q4, q3 -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.32 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.32 q4[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.32 q5[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.32 q5[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.32 q5[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.32 q5[3], r0 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vand q5, q5, q2 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vmul.i32 q4, q5, q4 -; CHECK-NEXT: vmov.32 q5[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.32 q5[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.32 q5[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.32 q5[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vand q1, q5, q2 -; CHECK-NEXT: vmov.32 q5[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.32 q5[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.32 q5[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.32 q5[3], r0 -; CHECK-NEXT: vand q0, q5, q2 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q4 -; CHECK-NEXT: vadd.i32 q0, q0, q3 -; CHECK-NEXT: vaddv.u32 r0, q0 -; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: vmlav.u8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <16 x i8> %x to <16 x i32> @@ -648,97 +235,7 @@ define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_sext(<16 x i8> %x, <16 x i8> %y) { ; CHECK-LABEL: add_v16i8_v16i32_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.32 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.32 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.32 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.32 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmovlb.s8 q2, q2 -; CHECK-NEXT: vmovlb.s8 q3, q3 -; CHECK-NEXT: vmovlb.s16 q2, q2 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmul.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.32 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.32 q4[3], r0 -; CHECK-NEXT: vmovlb.s8 q3, q3 -; CHECK-NEXT: vmovlb.s8 q4, q4 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmovlb.s16 q4, q4 -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmul.i32 q3, q4, q3 -; CHECK-NEXT: vadd.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.32 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.32 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.32 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.32 q4[3], r0 -; CHECK-NEXT: vmovlb.s8 q3, q3 -; CHECK-NEXT: vmovlb.s8 q4, q4 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmovlb.s16 q4, q4 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vmul.i32 q3, q4, q3 -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.32 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.32 q4[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vmovlb.s8 q1, q4 -; CHECK-NEXT: vmov.32 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmovlb.s16 q1, q1 -; CHECK-NEXT: vmov.32 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.32 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.32 q4[3], r0 -; CHECK-NEXT: vmovlb.s8 q0, q4 -; CHECK-NEXT: vmovlb.s16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q3 -; CHECK-NEXT: vadd.i32 q0, q0, q2 -; CHECK-NEXT: vaddv.u32 r0, q0 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmlav.s8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <16 x i8> %x to <16 x i32> @@ -754,8 +251,7 @@ ; CHECK-NEXT: vmov.i32 q2, #0xff ; CHECK-NEXT: vand q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddv.u32 r0, q0 +; CHECK-NEXT: vmlav.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <4 x i8> %x to <4 x i32> @@ -772,8 +268,7 @@ ; CHECK-NEXT: vmovlb.s8 q0, q0 ; CHECK-NEXT: vmovlb.s16 q1, q1 ; CHECK-NEXT: vmovlb.s16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddv.u32 r0, q0 +; CHECK-NEXT: vmlav.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <4 x i8> %x to <4 x i32> @@ -1471,8 +966,7 @@ define arm_aapcs_vfpcc i32 @add_v4i32_v4i32_acc(<4 x i32> %x, <4 x i32> %y, i32 %a) { ; CHECK-LABEL: add_v4i32_v4i32_acc: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddva.u32 r0, q0 +; CHECK-NEXT: vmlava.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %m = mul <4 x i32> %x, %y @@ -1484,54 +978,8 @@ define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_zext(<4 x i32> %x, <4 x i32> %y, i64 %a) { ; CHECK-LABEL: add_v4i32_v4i64_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r7, lr} -; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.f32 s8, s4 -; CHECK-NEXT: vmov.f32 s12, s0 -; CHECK-NEXT: vmov.f32 s10, s5 -; CHECK-NEXT: vmov.f32 s14, s1 -; CHECK-NEXT: vmov r2, s8 -; CHECK-NEXT: vmov r3, s12 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q4[0], r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: vmov.32 q4[1], r3 -; CHECK-NEXT: vmov r3, s14 -; CHECK-NEXT: vmov.f32 s8, s6 -; CHECK-NEXT: vmov.f32 s12, s2 -; CHECK-NEXT: vmov.f32 s10, s7 -; CHECK-NEXT: vmov.f32 s14, s3 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q4[2], r2 -; CHECK-NEXT: vmov.32 q4[3], r3 -; CHECK-NEXT: vmov lr, s18 -; CHECK-NEXT: vmov r2, s16 -; CHECK-NEXT: vmov r12, s17 -; CHECK-NEXT: adds.w lr, lr, r2 -; CHECK-NEXT: adr r2, .LCPI29_0 -; CHECK-NEXT: vldrw.u32 q1, [r2] -; CHECK-NEXT: adc.w r3, r3, r12 -; CHECK-NEXT: vand q2, q2, q1 -; CHECK-NEXT: vand q0, q3, q1 -; CHECK-NEXT: vmov r12, s8 -; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: umlal lr, r3, r2, r12 -; CHECK-NEXT: vmov r12, s10 -; CHECK-NEXT: vmov r2, s2 -; CHECK-NEXT: umlal lr, r3, r2, r12 -; CHECK-NEXT: adds.w r0, r0, lr -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r7, pc} -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI29_0: -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: vmlalva.u32 r0, r1, q0, q1 +; CHECK-NEXT: bx lr entry: %xx = zext <4 x i32> %x to <4 x i64> %yy = zext <4 x i32> %y to <4 x i64> @@ -1544,43 +992,8 @@ define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_sext(<4 x i32> %x, <4 x i32> %y, i64 %a) { ; CHECK-LABEL: add_v4i32_v4i64_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r7, lr} -; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.f32 s8, s4 -; CHECK-NEXT: vmov.f32 s12, s0 -; CHECK-NEXT: vmov.f32 s10, s5 -; CHECK-NEXT: vmov.f32 s14, s1 -; CHECK-NEXT: vmov r2, s8 -; CHECK-NEXT: vmov r3, s12 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q4[0], r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: vmov.32 q4[1], r3 -; CHECK-NEXT: vmov r3, s14 -; CHECK-NEXT: vmov.f32 s8, s6 -; CHECK-NEXT: vmov.f32 s10, s7 -; CHECK-NEXT: vmov.f32 s4, s2 -; CHECK-NEXT: vmov.f32 s6, s3 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q4[2], r2 -; CHECK-NEXT: vmov.32 q4[3], r3 -; CHECK-NEXT: vmov lr, s18 -; CHECK-NEXT: vmov r2, s16 -; CHECK-NEXT: vmov r12, s17 -; CHECK-NEXT: adds.w lr, lr, r2 -; CHECK-NEXT: vmov r2, s4 -; CHECK-NEXT: adc.w r3, r3, r12 -; CHECK-NEXT: vmov r12, s8 -; CHECK-NEXT: smlal lr, r3, r2, r12 -; CHECK-NEXT: vmov r12, s10 -; CHECK-NEXT: vmov r2, s6 -; CHECK-NEXT: smlal lr, r3, r2, r12 -; CHECK-NEXT: adds.w r0, r0, lr -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r7, pc} +; CHECK-NEXT: vmlalva.s32 r0, r1, q0, q1 +; CHECK-NEXT: bx lr entry: %xx = sext <4 x i32> %x to <4 x i64> %yy = sext <4 x i32> %y to <4 x i64> @@ -1639,46 +1052,7 @@ define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_zext(<8 x i16> %x, <8 x i16> %y, i32 %a) { ; CHECK-LABEL: add_v8i16_v8i32_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u16 r1, q1[4] -; CHECK-NEXT: vmov.32 q2[0], r1 -; CHECK-NEXT: vmov.u16 r1, q1[5] -; CHECK-NEXT: vmov.32 q2[1], r1 -; CHECK-NEXT: vmov.u16 r1, q1[6] -; CHECK-NEXT: vmov.32 q2[2], r1 -; CHECK-NEXT: vmov.u16 r1, q1[7] -; CHECK-NEXT: vmov.32 q2[3], r1 -; CHECK-NEXT: vmov.u16 r1, q0[4] -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u16 r1, q0[5] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u16 r1, q0[6] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u16 r1, q0[7] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmovlb.u16 q2, q2 -; CHECK-NEXT: vmovlb.u16 q3, q3 -; CHECK-NEXT: vmov.u16 r1, q1[0] -; CHECK-NEXT: vmul.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u16 r1, q1[1] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u16 r1, q1[2] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u16 r1, q1[3] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmov.u16 r1, q0[0] -; CHECK-NEXT: vmovlb.u16 q1, q3 -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u16 r1, q0[1] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u16 r1, q0[2] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u16 r1, q0[3] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmovlb.u16 q0, q3 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q2 -; CHECK-NEXT: vaddva.u32 r0, q0 +; CHECK-NEXT: vmlava.u8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <8 x i16> %x to <8 x i32> @@ -1692,46 +1066,7 @@ define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_sext(<8 x i16> %x, <8 x i16> %y, i32 %a) { ; CHECK-LABEL: add_v8i16_v8i32_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u16 r1, q1[4] -; CHECK-NEXT: vmov.32 q2[0], r1 -; CHECK-NEXT: vmov.u16 r1, q1[5] -; CHECK-NEXT: vmov.32 q2[1], r1 -; CHECK-NEXT: vmov.u16 r1, q1[6] -; CHECK-NEXT: vmov.32 q2[2], r1 -; CHECK-NEXT: vmov.u16 r1, q1[7] -; CHECK-NEXT: vmov.32 q2[3], r1 -; CHECK-NEXT: vmov.u16 r1, q0[4] -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u16 r1, q0[5] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u16 r1, q0[6] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u16 r1, q0[7] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmovlb.s16 q2, q2 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmov.u16 r1, q1[0] -; CHECK-NEXT: vmul.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u16 r1, q1[1] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u16 r1, q1[2] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u16 r1, q1[3] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmov.u16 r1, q0[0] -; CHECK-NEXT: vmovlb.s16 q1, q3 -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u16 r1, q0[1] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u16 r1, q0[2] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u16 r1, q0[3] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmovlb.s16 q0, q3 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q2 -; CHECK-NEXT: vaddva.u32 r0, q0 +; CHECK-NEXT: vmlava.s8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <8 x i16> %x to <8 x i32> @@ -1747,8 +1082,7 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmovlb.u16 q1, q1 ; CHECK-NEXT: vmovlb.u16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddva.u32 r0, q0 +; CHECK-NEXT: vmlava.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <4 x i16> %x to <4 x i32> @@ -1764,8 +1098,7 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmovlb.s16 q1, q1 ; CHECK-NEXT: vmovlb.s16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddva.u32 r0, q0 +; CHECK-NEXT: vmlava.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <4 x i16> %x to <4 x i32> @@ -1792,118 +1125,8 @@ define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_zext(<8 x i16> %x, <8 x i16> %y, i64 %a) { ; CHECK-LABEL: add_v8i16_v8i64_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vmov.u16 r2, q1[0] -; CHECK-NEXT: vmov.u16 r3, q0[0] -; CHECK-NEXT: vmov.32 q3[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[1] -; CHECK-NEXT: vmov.32 q3[2], r2 -; CHECK-NEXT: adr r2, .LCPI38_0 -; CHECK-NEXT: vldrw.u32 q2, [r2] -; CHECK-NEXT: vmov.32 q4[0], r3 -; CHECK-NEXT: vmov.u16 r3, q0[1] -; CHECK-NEXT: vmov.32 q4[2], r3 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov r2, s12 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[0], r2 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmov.32 q5[1], r3 -; CHECK-NEXT: vmov r3, s18 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[2], r2 -; CHECK-NEXT: vmov.32 q5[3], r3 -; CHECK-NEXT: vmov r12, s22 -; CHECK-NEXT: vmov r2, s20 -; CHECK-NEXT: vmov lr, s21 -; CHECK-NEXT: adds.w r12, r12, r2 -; CHECK-NEXT: vmov.u16 r2, q1[2] -; CHECK-NEXT: adc.w lr, lr, r3 -; CHECK-NEXT: vmov.u16 r3, q0[2] -; CHECK-NEXT: vmov.32 q3[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[3] -; CHECK-NEXT: vmov.32 q4[0], r3 -; CHECK-NEXT: vmov.u16 r3, q0[3] -; CHECK-NEXT: vmov.32 q3[2], r2 -; CHECK-NEXT: vmov.32 q4[2], r3 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov r2, s12 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[0], r2 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmov.32 q5[1], r3 -; CHECK-NEXT: vmov r3, s18 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[2], r2 -; CHECK-NEXT: vmov.32 q5[3], r3 -; CHECK-NEXT: vmov r4, s20 -; CHECK-NEXT: vmov r2, s21 -; CHECK-NEXT: adds.w r4, r4, r12 -; CHECK-NEXT: adc.w r12, lr, r2 -; CHECK-NEXT: vmov r2, s22 -; CHECK-NEXT: adds.w lr, r4, r2 -; CHECK-NEXT: vmov.u16 r2, q1[4] -; CHECK-NEXT: adc.w r12, r12, r3 -; CHECK-NEXT: vmov.u16 r3, q0[4] -; CHECK-NEXT: vmov.32 q3[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[5] -; CHECK-NEXT: vmov.32 q4[0], r3 -; CHECK-NEXT: vmov.u16 r3, q0[5] -; CHECK-NEXT: vmov.32 q3[2], r2 -; CHECK-NEXT: vmov.32 q4[2], r3 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov r2, s12 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[0], r2 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmov.32 q5[1], r3 -; CHECK-NEXT: vmov r3, s18 -; CHECK-NEXT: umull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q5[2], r2 -; CHECK-NEXT: vmov.32 q5[3], r3 -; CHECK-NEXT: vmov r4, s20 -; CHECK-NEXT: vmov r2, s21 -; CHECK-NEXT: adds.w r4, r4, lr -; CHECK-NEXT: adc.w r12, r12, r2 -; CHECK-NEXT: vmov r2, s22 -; CHECK-NEXT: adds r2, r2, r4 -; CHECK-NEXT: vmov.u16 r4, q1[6] -; CHECK-NEXT: vmov.32 q3[0], r4 -; CHECK-NEXT: vmov.u16 r4, q1[7] -; CHECK-NEXT: vmov.32 q3[2], r4 -; CHECK-NEXT: vmov.u16 r4, q0[6] -; CHECK-NEXT: vand q1, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r4 -; CHECK-NEXT: vmov.u16 r4, q0[7] -; CHECK-NEXT: adc.w r3, r3, r12 -; CHECK-NEXT: vmov.32 q3[2], r4 -; CHECK-NEXT: vmov r12, s4 -; CHECK-NEXT: vand q0, q3, q2 -; CHECK-NEXT: vmov r4, s0 -; CHECK-NEXT: umlal r2, r3, r4, r12 -; CHECK-NEXT: vmov r12, s6 -; CHECK-NEXT: vmov r4, s2 -; CHECK-NEXT: umlal r2, r3, r4, r12 -; CHECK-NEXT: adds r0, r0, r2 -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: vpop {d8, d9, d10, d11} -; CHECK-NEXT: pop {r4, pc} -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI38_0: -; CHECK-NEXT: .long 65535 @ 0xffff -; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 65535 @ 0xffff -; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: vmlalva.u16 r0, r1, q0, q1 +; CHECK-NEXT: bx lr entry: %xx = zext <8 x i16> %x to <8 x i64> %yy = zext <8 x i16> %y to <8 x i64> @@ -1916,82 +1139,8 @@ define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_sext(<8 x i16> %x, <8 x i16> %y, i64 %a) { ; CHECK-LABEL: add_v8i16_v8i64_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} -; CHECK-NEXT: vmov.u16 r2, q1[0] -; CHECK-NEXT: vmov.u16 r3, q0[0] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q2[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[1] -; CHECK-NEXT: vmov.32 q2[1], r3 -; CHECK-NEXT: vmov.u16 r3, q0[1] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q2[2], r2 -; CHECK-NEXT: vmov.32 q2[3], r3 -; CHECK-NEXT: vmov lr, s10 -; CHECK-NEXT: vmov r2, s8 -; CHECK-NEXT: vmov r12, s9 -; CHECK-NEXT: adds.w lr, lr, r2 -; CHECK-NEXT: vmov.u16 r2, q1[2] -; CHECK-NEXT: adc.w r12, r12, r3 -; CHECK-NEXT: vmov.u16 r3, q0[2] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q2[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[3] -; CHECK-NEXT: vmov.32 q2[1], r3 -; CHECK-NEXT: vmov.u16 r3, q0[3] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: vmov.32 q2[2], r2 -; CHECK-NEXT: vmov.32 q2[3], r3 -; CHECK-NEXT: vmov r4, s8 -; CHECK-NEXT: vmov r2, s9 -; CHECK-NEXT: adds.w r4, r4, lr -; CHECK-NEXT: adc.w r12, r12, r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: adds.w lr, r4, r2 -; CHECK-NEXT: vmov.u16 r4, q1[4] -; CHECK-NEXT: vmov.u16 r2, q0[4] -; CHECK-NEXT: sxth r4, r4 -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: adc.w r12, r12, r3 -; CHECK-NEXT: smull r2, r4, r2, r4 -; CHECK-NEXT: vmov.32 q2[0], r2 -; CHECK-NEXT: vmov.u16 r2, q1[5] -; CHECK-NEXT: vmov.32 q2[1], r4 -; CHECK-NEXT: vmov.u16 r4, q0[5] -; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: sxth r4, r4 -; CHECK-NEXT: smull r2, r4, r4, r2 -; CHECK-NEXT: vmov.32 q2[2], r2 -; CHECK-NEXT: vmov.32 q2[3], r4 -; CHECK-NEXT: vmov r3, s8 -; CHECK-NEXT: vmov r2, s9 -; CHECK-NEXT: adds.w r3, r3, lr -; CHECK-NEXT: adc.w r12, r12, r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: adds r2, r2, r3 -; CHECK-NEXT: adc.w r3, r12, r4 -; CHECK-NEXT: vmov.u16 r4, q1[6] -; CHECK-NEXT: sxth.w r12, r4 -; CHECK-NEXT: vmov.u16 r4, q0[6] -; CHECK-NEXT: sxth r4, r4 -; CHECK-NEXT: smlal r2, r3, r4, r12 -; CHECK-NEXT: vmov.u16 r4, q1[7] -; CHECK-NEXT: sxth.w r12, r4 -; CHECK-NEXT: vmov.u16 r4, q0[7] -; CHECK-NEXT: sxth r4, r4 -; CHECK-NEXT: smlal r2, r3, r4, r12 -; CHECK-NEXT: adds r0, r0, r2 -; CHECK-NEXT: adcs r1, r3 -; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: vmlalva.s16 r0, r1, q0, q1 +; CHECK-NEXT: bx lr entry: %xx = sext <8 x i16> %x to <8 x i64> %yy = sext <8 x i16> %y to <8 x i64> @@ -2065,90 +1214,7 @@ define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_zext(<16 x i8> %x, <16 x i8> %y, i32 %a) { ; CHECK-LABEL: add_v16i8_v16i32_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.i32 q2, #0xff -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vand q3, q3, q2 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmul.i32 q3, q4, q3 -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.32 q5[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.32 q5[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.32 q5[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.32 q5[3], r1 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vand q5, q5, q2 -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmul.i32 q4, q5, q4 -; CHECK-NEXT: vadd.i32 q3, q4, q3 -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.32 q5[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.32 q5[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.32 q5[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.32 q5[3], r1 -; CHECK-NEXT: vand q4, q4, q2 -; CHECK-NEXT: vand q5, q5, q2 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vmul.i32 q4, q5, q4 -; CHECK-NEXT: vmov.32 q5[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.32 q5[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.32 q5[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.32 q5[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vand q1, q5, q2 -; CHECK-NEXT: vmov.32 q5[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.32 q5[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.32 q5[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.32 q5[3], r1 -; CHECK-NEXT: vand q0, q5, q2 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q4 -; CHECK-NEXT: vadd.i32 q0, q0, q3 -; CHECK-NEXT: vaddva.u32 r0, q0 -; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: vmlava.u8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <16 x i8> %x to <16 x i32> @@ -2162,97 +1228,7 @@ define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_sext(<16 x i8> %x, <16 x i8> %y, i32 %a) { ; CHECK-LABEL: add_v16i8_v16i32_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.32 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.32 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.32 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.32 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmovlb.s8 q2, q2 -; CHECK-NEXT: vmovlb.s8 q3, q3 -; CHECK-NEXT: vmovlb.s16 q2, q2 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmul.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmovlb.s8 q3, q3 -; CHECK-NEXT: vmovlb.s8 q4, q4 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmovlb.s16 q4, q4 -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmul.i32 q3, q4, q3 -; CHECK-NEXT: vadd.i32 q2, q3, q2 -; CHECK-NEXT: vmov.32 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.32 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.32 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.32 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmovlb.s8 q3, q3 -; CHECK-NEXT: vmovlb.s8 q4, q4 -; CHECK-NEXT: vmovlb.s16 q3, q3 -; CHECK-NEXT: vmovlb.s16 q4, q4 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vmul.i32 q3, q4, q3 -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vmovlb.s8 q1, q4 -; CHECK-NEXT: vmov.32 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmovlb.s16 q1, q1 -; CHECK-NEXT: vmov.32 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.32 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.32 q4[3], r1 -; CHECK-NEXT: vmovlb.s8 q0, q4 -; CHECK-NEXT: vmovlb.s16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vadd.i32 q0, q0, q3 -; CHECK-NEXT: vadd.i32 q0, q0, q2 -; CHECK-NEXT: vaddva.u32 r0, q0 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmlava.s8 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <16 x i8> %x to <16 x i32> @@ -2269,8 +1245,7 @@ ; CHECK-NEXT: vmov.i32 q2, #0xff ; CHECK-NEXT: vand q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddva.u32 r0, q0 +; CHECK-NEXT: vmlava.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = zext <4 x i8> %x to <4 x i32> @@ -2288,8 +1263,7 @@ ; CHECK-NEXT: vmovlb.s8 q0, q0 ; CHECK-NEXT: vmovlb.s16 q1, q1 ; CHECK-NEXT: vmovlb.s16 q0, q0 -; CHECK-NEXT: vmul.i32 q0, q0, q1 -; CHECK-NEXT: vaddva.u32 r0, q0 +; CHECK-NEXT: vmlava.u32 r0, q0, q1 ; CHECK-NEXT: bx lr entry: %xx = sext <4 x i8> %x to <4 x i32>