Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -267,6 +267,8 @@ setOperationAction(ISD::BSWAP, VT, Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom); setOperationAction(ISD::AND, VT, Custom); + setOperationAction(ISD::SADDSAT, VT, Legal); + setOperationAction(ISD::UADDSAT, VT, Legal); // No native support for these. setOperationAction(ISD::UDIV, VT, Expand); Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -1585,8 +1585,8 @@ } class MVE_VQADDSUB size, list pattern=[]> - : MVE_int { + bits<2> size, ValueType vt> + : MVE_int { let Inst{28} = U; let Inst{25-23} = 0b110; @@ -1596,26 +1596,40 @@ let Inst{8} = 0b0; let Inst{4} = 0b1; let Inst{0} = 0b0; + + ValueType VT = vt; +} + +class MVE_VQADD size, ValueType VT> + : MVE_VQADDSUB<"vqadd", suffix, U, 0b0, size, VT>; +class MVE_VQSUB size, ValueType VT> + : MVE_VQADDSUB<"vqsub", suffix, U, 0b1, size, VT>; + +def MVE_VQADDs8 : MVE_VQADD<"s8", 0b0, 0b00, v16i8>; +def MVE_VQADDs16 : MVE_VQADD<"s16", 0b0, 0b01, v8i16>; +def MVE_VQADDs32 : MVE_VQADD<"s32", 0b0, 0b10, v4i32>; +def MVE_VQADDu8 : MVE_VQADD<"u8", 0b1, 0b00, v16i8>; +def MVE_VQADDu16 : MVE_VQADD<"u16", 0b1, 0b01, v8i16>; +def MVE_VQADDu32 : MVE_VQADD<"u32", 0b1, 0b10, v4i32>; + +def MVE_VQSUBs8 : MVE_VQSUB<"s8", 0b0, 0b00, v16i8>; +def MVE_VQSUBs16 : MVE_VQSUB<"s16", 0b0, 0b01, v8i16>; +def MVE_VQSUBs32 : MVE_VQSUB<"s32", 0b0, 0b10, v4i32>; +def MVE_VQSUBu8 : MVE_VQSUB<"u8", 0b1, 0b00, v16i8>; +def MVE_VQSUBu16 : MVE_VQSUB<"u16", 0b1, 0b01, v8i16>; +def MVE_VQSUBu32 : MVE_VQSUB<"u32", 0b1, 0b10, v4i32>; + +let Predicates = [HasMVEInt] in { + foreach instr = [MVE_VQADDu8, MVE_VQADDu16, MVE_VQADDu32] in + foreach VT = [instr.VT] in + def : Pat<(VT (uaddsat (VT MQPR:$Qm), (VT MQPR:$Qn))), + (VT (instr (VT MQPR:$Qm), (VT MQPR:$Qn)))>; + foreach instr = [MVE_VQADDs8, MVE_VQADDs16, MVE_VQADDs32] in + foreach VT = [instr.VT] in + def : Pat<(VT (saddsat (VT MQPR:$Qm), (VT MQPR:$Qn))), + (VT (instr (VT MQPR:$Qm), (VT MQPR:$Qn)))>; } -class MVE_VQADD size, list pattern=[]> - : MVE_VQADDSUB<"vqadd", suffix, U, 0b0, size, pattern>; -class MVE_VQSUB size, list pattern=[]> - : MVE_VQADDSUB<"vqsub", suffix, U, 0b1, size, pattern>; - -def MVE_VQADDs8 : MVE_VQADD<"s8", 0b0, 0b00>; -def MVE_VQADDs16 : MVE_VQADD<"s16", 0b0, 0b01>; -def MVE_VQADDs32 : MVE_VQADD<"s32", 0b0, 0b10>; -def MVE_VQADDu8 : MVE_VQADD<"u8", 0b1, 0b00>; -def MVE_VQADDu16 : MVE_VQADD<"u16", 0b1, 0b01>; -def MVE_VQADDu32 : MVE_VQADD<"u32", 0b1, 0b10>; - -def MVE_VQSUBs8 : MVE_VQSUB<"s8", 0b0, 0b00>; -def MVE_VQSUBs16 : MVE_VQSUB<"s16", 0b0, 0b01>; -def MVE_VQSUBs32 : MVE_VQSUB<"s32", 0b0, 0b10>; -def MVE_VQSUBu8 : MVE_VQSUB<"u8", 0b1, 0b00>; -def MVE_VQSUBu16 : MVE_VQSUB<"u16", 0b1, 0b01>; -def MVE_VQSUBu32 : MVE_VQSUB<"u32", 0b1, 0b10>; class MVE_VABD_int size, list pattern=[]> : MVE_int<"vabd", suffix, size, pattern> { Index: llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll +++ llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll @@ -4,21 +4,7 @@ define arm_aapcs_vfpcc <16 x i8> @sadd_int8_t(<16 x i8> %src1, <16 x i8> %src2) { ; CHECK-LABEL: sadd_int8_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vadd.i8 q2, q0, q1 -; CHECK-NEXT: vmov.i8 q3, #0x80 -; CHECK-NEXT: vcmp.s8 lt, q2, zr -; CHECK-NEXT: vmov.i8 q4, #0x7f -; CHECK-NEXT: vpsel q3, q4, q3 -; CHECK-NEXT: vcmp.s8 gt, q0, q2 -; CHECK-NEXT: vmrs r0, p0 -; CHECK-NEXT: vcmp.s8 lt, q1, zr -; CHECK-NEXT: vmrs r1, p0 -; CHECK-NEXT: eors r0, r1 -; CHECK-NEXT: vmsr p0, r0 -; CHECK-NEXT: vpsel q0, q3, q2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vqadd.s8 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2) @@ -28,21 +14,7 @@ define arm_aapcs_vfpcc <8 x i16> @sadd_int16_t(<8 x i16> %src1, <8 x i16> %src2) { ; CHECK-LABEL: sadd_int16_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vadd.i16 q2, q0, q1 -; CHECK-NEXT: vmov.i16 q3, #0x8000 -; CHECK-NEXT: vcmp.s16 lt, q2, zr -; CHECK-NEXT: vmvn.i16 q4, #0x8000 -; CHECK-NEXT: vpsel q3, q4, q3 -; CHECK-NEXT: vcmp.s16 gt, q0, q2 -; CHECK-NEXT: vmrs r0, p0 -; CHECK-NEXT: vcmp.s16 lt, q1, zr -; CHECK-NEXT: vmrs r1, p0 -; CHECK-NEXT: eors r0, r1 -; CHECK-NEXT: vmsr p0, r0 -; CHECK-NEXT: vpsel q0, q3, q2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vqadd.s16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2) @@ -52,21 +24,7 @@ define arm_aapcs_vfpcc <4 x i32> @sadd_int32_t(<4 x i32> %src1, <4 x i32> %src2) { ; CHECK-LABEL: sadd_int32_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vadd.i32 q2, q0, q1 -; CHECK-NEXT: vmov.i32 q3, #0x80000000 -; CHECK-NEXT: vcmp.s32 lt, q2, zr -; CHECK-NEXT: vmvn.i32 q4, #0x80000000 -; CHECK-NEXT: vpsel q3, q4, q3 -; CHECK-NEXT: vcmp.s32 gt, q0, q2 -; CHECK-NEXT: vmrs r0, p0 -; CHECK-NEXT: vcmp.s32 lt, q1, zr -; CHECK-NEXT: vmrs r1, p0 -; CHECK-NEXT: eors r0, r1 -; CHECK-NEXT: vmsr p0, r0 -; CHECK-NEXT: vpsel q0, q3, q2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vqadd.s32 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2) @@ -156,9 +114,7 @@ define arm_aapcs_vfpcc <16 x i8> @uadd_int8_t(<16 x i8> %src1, <16 x i8> %src2) { ; CHECK-LABEL: uadd_int8_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmvn q2, q1 -; CHECK-NEXT: vmin.u8 q0, q0, q2 -; CHECK-NEXT: vadd.i8 q0, q0, q1 +; CHECK-NEXT: vqadd.u8 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2) @@ -168,9 +124,7 @@ define arm_aapcs_vfpcc <8 x i16> @uadd_int16_t(<8 x i16> %src1, <8 x i16> %src2) { ; CHECK-LABEL: uadd_int16_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmvn q2, q1 -; CHECK-NEXT: vmin.u16 q0, q0, q2 -; CHECK-NEXT: vadd.i16 q0, q0, q1 +; CHECK-NEXT: vqadd.u16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2) @@ -180,9 +134,7 @@ define arm_aapcs_vfpcc <4 x i32> @uadd_int32_t(<4 x i32> %src1, <4 x i32> %src2) { ; CHECK-LABEL: uadd_int32_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmvn q2, q1 -; CHECK-NEXT: vmin.u32 q0, q0, q2 -; CHECK-NEXT: vadd.i32 q0, q0, q1 +; CHECK-NEXT: vqadd.u32 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)