Index: llvm/lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -8423,9 +8423,9 @@ V128, v4f32, v8f16, OpNode>; } -let mayRaiseFPException = 1, Uses = [FPCR] in multiclass SIMDFPIndexed opc, string asm, SDPatternOperator OpNode> { + let mayRaiseFPException = 1, Uses = [FPCR] in { let Predicates = [HasNEON, HasFullFP16] in { def v4i16_indexed : BaseSIMDIndexed<0, U, 0, 0b00, opc, V64, V64, @@ -8528,6 +8528,27 @@ let Inst{11} = idx{0}; let Inst{21} = 0; } + } // mayRaiseFPException = 1, Uses = [FPCR] + + let Predicates = [HasNEON, HasFullFP16] in { + def : Pat<(f16 (OpNode + (f16 (vector_extract (v8f16 V128:$Rn), (i64 0))), + (f16 (vector_extract (v8f16 V128:$Rm), VectorIndexH:$idx)))), + (!cast(NAME # v1i16_indexed) + (EXTRACT_SUBREG V128:$Rn, hsub), V128:$Rm, VectorIndexH:$idx)>; + } + + def : Pat<(f32 (OpNode + (f32 (vector_extract (v4f32 V128:$Rn), (i64 0))), + (f32 (vector_extract (v4f32 V128:$Rm), VectorIndexS:$idx)))), + (!cast(NAME # v1i32_indexed) + (EXTRACT_SUBREG V128:$Rn, ssub), V128:$Rm, VectorIndexS:$idx)>; + + def : Pat<(f64 (OpNode + (f64 (vector_extract (v2f64 V128:$Rn), (i64 0))), + (f64 (vector_extract (v2f64 V128:$Rm), VectorIndexD:$idx)))), + (!cast(NAME # v1i64_indexed) + (EXTRACT_SUBREG V128:$Rn, dsub), V128:$Rm, VectorIndexD:$idx)>; } multiclass SIMDFPIndexedTiedPatterns { Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -4437,6 +4437,19 @@ } defm FSUB : TwoOperandFPData<0b0011, "fsub", any_fsub>; +// Match scalar FMUL instead of indexed FMUL when extracting lane 0. +let Predicates = [HasFullFP16] in { +def : Pat<(f16 (any_fmul (f16 FPR16:$Rn), + (f16 (vector_extract (v8f16 V128:$Rm), (i64 0))))), + (FMULHrr FPR16:$Rn, (EXTRACT_SUBREG V128:$Rm, hsub))>; +} +def : Pat<(f32 (any_fmul (f32 FPR32:$Rn), + (f32 (vector_extract (v4f32 V128:$Rm), (i64 0))))), + (FMULSrr FPR32:$Rn, (EXTRACT_SUBREG V128:$Rm, ssub))>; +def : Pat<(f64 (any_fmul (f64 FPR64:$Rn), + (f64 (vector_extract (v2f64 V128:$Rm), (i64 0))))), + (FMULDrr FPR64:$Rn, (EXTRACT_SUBREG V128:$Rm, dsub))>; + // Match reassociated forms of FNMUL. def : Pat<(fmul (fneg FPR16:$a), (f16 FPR16:$b)), (FNMULHrr FPR16:$a, FPR16:$b)>, @@ -5214,6 +5227,23 @@ defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>; defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>; defm FMULX : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorSME>; +// Match scalar FMULX instead of indexed FMULX when extracting lane 0. +let Predicates = [HasNEON, HasFullFP16] in { +def : Pat<(f16 (int_aarch64_neon_fmulx + (f16 FPR16:$Rn), + (f16 (vector_extract (v8f16 V128:$Rm), (i64 0))))), + (FMULX16 FPR16:$Rn, (EXTRACT_SUBREG V128:$Rm, hsub))>; +} +let Predicates = [HasNEON] in { +def : Pat<(f32 (int_aarch64_neon_fmulx + (f32 FPR32:$Rn), + (f32 (vector_extract (v4f32 V128:$Rm), (i64 0))))), + (FMULX32 FPR32:$Rn, (EXTRACT_SUBREG V128:$Rm, ssub))>; +def : Pat<(f64 (int_aarch64_neon_fmulx + (f64 FPR64:$Rn), + (f64 (vector_extract (v2f64 V128:$Rm), (i64 0))))), + (FMULX64 FPR64:$Rn, (EXTRACT_SUBREG V128:$Rm, dsub))>; +} defm FRECPS : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorSME>; defm FRSQRTS : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorSME>; defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>; Index: llvm/test/CodeGen/AArch64/arm64-fma-combines.ll =================================================================== --- llvm/test/CodeGen/AArch64/arm64-fma-combines.ll +++ llvm/test/CodeGen/AArch64/arm64-fma-combines.ll @@ -17,7 +17,7 @@ ; CHECK-LABEL: %for.body ; CHECK: fmla.2d {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} ; CHECK: fmla.2d {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0] -; CHECK: fmla.d {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}[0] +; CHECK: fmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} for.body: ; preds = %for.body, %entry %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 @@ -59,7 +59,7 @@ ; CHECK-LABEL: %for.body ; CHECK: fmla.2s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} ; CHECK: fmla.2s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0] -; CHECK: fmla.s {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}[0] +; CHECK: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}} for.body: ; preds = %for.body, %entry %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 Index: llvm/test/CodeGen/AArch64/arm64-fml-combines.ll =================================================================== --- llvm/test/CodeGen/AArch64/arm64-fml-combines.ll +++ llvm/test/CodeGen/AArch64/arm64-fml-combines.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: %for.body ; CHECK: fmls.2d {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} ; CHECK: fmls.2d {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0] -; CHECK: fmls.d {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}[0] +; CHECK: fmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} for.body: ; preds = %for.body, %entry %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %indvars.iv.next = sub nuw nsw i64 %indvars.iv, 1 @@ -52,7 +52,7 @@ ; CHECK-LABEL: %for.body ; CHECK: fmls.2s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} ; CHECK: fmls.2s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0] -; CHECK: fmls.s {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}[0] +; CHECK: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}} for.body: ; preds = %for.body, %entry %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 Index: llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll =================================================================== --- llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll +++ llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll @@ -3560,7 +3560,7 @@ define <1 x double> @test_vmul_laneq_f64_0(<1 x double> %a, <2 x double> %v) { ; CHECK-LABEL: test_vmul_laneq_f64_0: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: fmul d0, d0, v1.d[0] +; CHECK-NEXT: fmul d0, d0, d1 ; CHECK-NEXT: ret entry: %0 = bitcast <1 x double> %a to <8 x i8> @@ -3715,3 +3715,44 @@ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) ret <2 x float> %0 } + +declare double @llvm.aarch64.neon.fmulx.f64(double, double) + +define <1 x double> @test_vmulx_lane_f64(<1 x double> %a, <1 x double> %v) { +; CHECK-LABEL: test_vmulx_lane_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmulx d0, d0, d1 +; CHECK-NEXT: ret +entry: + %vget_lane = extractelement <1 x double> %a, i64 0 + %vget_lane3 = extractelement <1 x double> %v, i64 0 + %vmulxd_f64.i = tail call double @llvm.aarch64.neon.fmulx.f64(double %vget_lane, double %vget_lane3) + %vset_lane = insertelement <1 x double> poison, double %vmulxd_f64.i, i64 0 + ret <1 x double> %vset_lane +} + +define <1 x double> @test_vmulx_laneq_f64(<1 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmulx_laneq_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmulx d0, d0, v1.d[1] +; CHECK-NEXT: ret +entry: + %vget_lane = extractelement <1 x double> %a, i64 0 + %vgetq_lane = extractelement <2 x double> %v, i64 1 + %vmulxd_f64.i = tail call double @llvm.aarch64.neon.fmulx.f64(double %vget_lane, double %vgetq_lane) + %vset_lane = insertelement <1 x double> poison, double %vmulxd_f64.i, i64 0 + ret <1 x double> %vset_lane +} + +define <1 x double> @test_vmulx_laneq_f64_0(<1 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmulx_laneq_f64_0: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmulx d0, d0, d1 +; CHECK-NEXT: ret +entry: + %vget_lane = extractelement <1 x double> %a, i64 0 + %vgetq_lane = extractelement <2 x double> %v, i64 0 + %vmulxd_f64.i = tail call double @llvm.aarch64.neon.fmulx.f64(double %vget_lane, double %vgetq_lane) + %vset_lane = insertelement <1 x double> poison, double %vmulxd_f64.i, i64 0 + ret <1 x double> %vset_lane +} Index: llvm/test/CodeGen/AArch64/arm64-neon-scalar-by-elem-mul.ll =================================================================== --- llvm/test/CodeGen/AArch64/arm64-neon-scalar-by-elem-mul.ll +++ llvm/test/CodeGen/AArch64/arm64-neon-scalar-by-elem-mul.ll @@ -126,7 +126,7 @@ define double @test_fmulx_laneq_f64_0(double %a, <2 x double> %v) { ; CHECK-LABEL: test_fmulx_laneq_f64_0: ; CHECK: // %bb.0: -; CHECK-NEXT: fmulx d0, d0, v1.d[0] +; CHECK-NEXT: fmulx d0, d0, d1 ; CHECK-NEXT: ret %tmp1 = extractelement <2 x double> %v, i32 0 %tmp2 = call double @llvm.aarch64.neon.fmulx.f64(double %a, double %tmp1) @@ -154,3 +154,79 @@ ret double %tmp2; } +define float @test_fmul_lane_ss2S_0(float %a, <2 x float> %v) { +; CHECK-LABEL: test_fmul_lane_ss2S_0: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: ret + %tmp1 = extractelement <2 x float> %v, i32 0 + %tmp2 = fmul float %a, %tmp1 + ret float %tmp2 +} + +define float @test_fmul_lane_ss4S_0(float %a, <4 x float> %v) { +; CHECK-LABEL: test_fmul_lane_ss4S_0: +; CHECK: // %bb.0: +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: ret + %tmp1 = extractelement <4 x float> %v, i32 0 + %tmp2 = fmul float %a, %tmp1 + ret float %tmp2 +} + +define double @test_fmul_lane_dd2D_0(double %a, <2 x double> %v) { +; CHECK-LABEL: test_fmul_lane_dd2D_0: +; CHECK: // %bb.0: +; CHECK-NEXT: fmul d0, d0, d1 +; CHECK-NEXT: ret + %tmp1 = extractelement <2 x double> %v, i32 0 + %tmp2 = fmul double %a, %tmp1 + ret double %tmp2 +} + +define float @test_fmulx_lane_f32_0(float %a, <2 x float> %v) { +; CHECK-LABEL: test_fmulx_lane_f32_0: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-NEXT: fmulx s0, s0, s1 +; CHECK-NEXT: ret + %tmp1 = extractelement <2 x float> %v, i32 0 + %tmp2 = call float @llvm.aarch64.neon.fmulx.f32(float %a, float %tmp1) + ret float %tmp2; +} + +define float @test_fmulx_laneq_f32_0(float %a, <4 x float> %v) { +; CHECK-LABEL: test_fmulx_laneq_f32_0: +; CHECK: // %bb.0: +; CHECK-NEXT: fmulx s0, s0, s1 +; CHECK-NEXT: ret + %tmp1 = extractelement <4 x float> %v, i32 0 + %tmp2 = call float @llvm.aarch64.neon.fmulx.f32(float %a, float %tmp1) + ret float %tmp2; +} + +define float @test_fmulx_horizontal_f32(<2 x float> %v) { +; CHECK-LABEL: test_fmulx_horizontal_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: fmulx s0, s0, v0.s[1] +; CHECK-NEXT: ret +entry: + %0 = extractelement <2 x float> %v, i32 0 + %1 = extractelement <2 x float> %v, i32 1 + %2 = call float @llvm.aarch64.neon.fmulx.f32(float %0, float %1) + ret float %2 +} + +define double @test_fmulx_horizontal_f64(<2 x double> %v) { +; CHECK-LABEL: test_fmulx_horizontal_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmulx d0, d0, v0.d[1] +; CHECK-NEXT: ret +entry: + %0 = extractelement <2 x double> %v, i32 0 + %1 = extractelement <2 x double> %v, i32 1 + %2 = call double @llvm.aarch64.neon.fmulx.f64(double %0, double %1) + ret double %2 +} Index: llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul.ll =================================================================== --- llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul.ll +++ llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: mov h3, v0.h[1] ; CHECK-NEXT: mov h2, v1.h[1] -; CHECK-NEXT: fmul h4, h2, v0.h[0] +; CHECK-NEXT: fmul h4, h0, v1.h[1] ; CHECK-NEXT: fnmul h2, h3, h2 ; CHECK-NEXT: fmla h4, h3, v1.h[0] ; CHECK-NEXT: fmla h2, h0, v1.h[0] Index: llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll =================================================================== --- llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll +++ llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll @@ -232,7 +232,7 @@ ; CHECK-LABEL: t_vmulh_lane_f16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: fmul h0, h0, v1.h[0] +; CHECK-NEXT: fmul h0, h0, h1 ; CHECK-NEXT: ret entry: %0 = extractelement <4 x half> %c, i32 0 @@ -243,7 +243,7 @@ define dso_local half @t_vmulh_laneq_f16(half %a, <8 x half> %c, i32 %lane) { ; CHECK-LABEL: t_vmulh_laneq_f16: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: fmul h0, h0, v1.h[0] +; CHECK-NEXT: fmul h0, h0, h1 ; CHECK-NEXT: ret entry: %0 = extractelement <8 x half> %c, i32 0 @@ -418,3 +418,62 @@ %1 = tail call half @llvm.fma.f16(half %b, half %extract, half %a) ret half %1 } + +define dso_local half @t_vmulh_lane3_f16(half %a, <4 x half> %c, i32 %lane) { +; CHECK-LABEL: t_vmulh_lane3_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-NEXT: fmul h0, h0, v1.h[3] +; CHECK-NEXT: ret +entry: + %0 = extractelement <4 x half> %c, i32 3 + %1 = fmul half %0, %a + ret half %1 +} + +define dso_local half @t_vmulh_laneq7_f16(half %a, <8 x half> %c, i32 %lane) { +; CHECK-LABEL: t_vmulh_laneq7_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmul h0, h0, v1.h[7] +; CHECK-NEXT: ret +entry: + %0 = extractelement <8 x half> %c, i32 7 + %1 = fmul half %0, %a + ret half %1 +} + +define dso_local half @t_vmulxh_lane0_f16(half %a, <4 x half> %b) { +; CHECK-LABEL: t_vmulxh_lane0_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-NEXT: fmulx h0, h0, h1 +; CHECK-NEXT: ret +entry: + %extract = extractelement <4 x half> %b, i32 0 + %fmulx.i = tail call half @llvm.aarch64.neon.fmulx.f16(half %a, half %extract) + ret half %fmulx.i +} + +define dso_local half @t_vmulxh_laneq0_f16(half %a, <8 x half> %b) { +; CHECK-LABEL: t_vmulxh_laneq0_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmulx h0, h0, h1 +; CHECK-NEXT: ret +entry: + %extract = extractelement <8 x half> %b, i32 0 + %fmulx.i = tail call half @llvm.aarch64.neon.fmulx.f16(half %a, half %extract) + ret half %fmulx.i +} + +define half @test_fmulx_horizontal_f16(<2 x half> %v) { +; CHECK-LABEL: test_fmulx_horizontal_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: fmulx h0, h0, v0.h[1] +; CHECK-NEXT: ret +entry: + %0 = extractelement <2 x half> %v, i32 0 + %1 = extractelement <2 x half> %v, i32 1 + %2 = call half @llvm.aarch64.neon.fmulx.f16(half %0, half %1) + ret half %2 +} Index: llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll =================================================================== --- llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll +++ llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll @@ -70,15 +70,15 @@ ; CHECK-NEXT: fmul s4, s0, v0.s[1] ; CHECK-NEXT: fmul s4, s4, v0.s[2] ; CHECK-NEXT: fmul s0, s4, v0.s[3] -; CHECK-NEXT: fmul s0, s0, v1.s[0] +; CHECK-NEXT: fmul s0, s0, s1 ; CHECK-NEXT: fmul s0, s0, v1.s[1] ; CHECK-NEXT: fmul s0, s0, v1.s[2] ; CHECK-NEXT: fmul s0, s0, v1.s[3] -; CHECK-NEXT: fmul s0, s0, v2.s[0] +; CHECK-NEXT: fmul s0, s0, s2 ; CHECK-NEXT: fmul s0, s0, v2.s[1] ; CHECK-NEXT: fmul s0, s0, v2.s[2] ; CHECK-NEXT: fmul s0, s0, v2.s[3] -; CHECK-NEXT: fmul s0, s0, v3.s[0] +; CHECK-NEXT: fmul s0, s0, s3 ; CHECK-NEXT: fmul s0, s0, v3.s[1] ; CHECK-NEXT: fmul s0, s0, v3.s[2] ; CHECK-NEXT: fmul s0, s0, v3.s[3]