diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -843,6 +843,11 @@ setCondCodeAction(CC, VT, Expand); } + setOperationAction(ISD::SMAX, VT, Legal); + setOperationAction(ISD::SMIN, VT, Legal); + setOperationAction(ISD::UMAX, VT, Legal); + setOperationAction(ISD::UMIN, VT, Legal); + // Promote load and store operations. setOperationAction(ISD::LOAD, VT, Promote); AddPromotedToType(ISD::LOAD, VT, PromotedBitwiseVT); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td @@ -1390,3 +1390,22 @@ def : Pat<(XVEI16VT (setule (XVEI16VT GPR:$rs1), GPR:$rs2)), (UCMPLE16 GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZpn] + +// max, min +let Predicates = [HasStdExtZpn] in { +def : Pat<(XVEI8VT (smax GPR:$rs1, GPR:$rs2)), (SMAX8 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI8VT (smin GPR:$rs1, GPR:$rs2)), (SMIN8 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI8VT (umax GPR:$rs1, GPR:$rs2)), (UMAX8 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI8VT (umin GPR:$rs1, GPR:$rs2)), (UMIN8 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI16VT (smax GPR:$rs1, GPR:$rs2)), (SMAX16 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI16VT (smin GPR:$rs1, GPR:$rs2)), (SMIN16 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI16VT (umax GPR:$rs1, GPR:$rs2)), (UMAX16 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI16VT (umin GPR:$rs1, GPR:$rs2)), (UMIN16 GPR:$rs1, GPR:$rs2)>; +} // Predicates = [HasStdExtZpn] + +let Predicates = [HasStdExtZpn, IsRV64] in { +def : Pat<(XVEI32VT (smax GPR:$rs1, GPR:$rs2)), (SMAX32 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI32VT (smin GPR:$rs1, GPR:$rs2)), (SMIN32 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI32VT (umax GPR:$rs1, GPR:$rs2)), (UMAX32 GPR:$rs1, GPR:$rs2)>; +def : Pat<(XVEI32VT (umin GPR:$rs1, GPR:$rs2)), (UMIN32 GPR:$rs1, GPR:$rs2)>; +} // Predicates = [HasStdExtZpn, IsRV64] diff --git a/llvm/test/CodeGen/RISCV/rvp/vector-maxmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvp/vector-maxmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvp/vector-maxmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvp/vector-maxmin-rv64.ll @@ -7,25 +7,7 @@ define i64 @smaxv2i32_1(i64 %a, i64 %b) nounwind { ; RV64-LABEL: smaxv2i32_1: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a2, 12(sp) -; RV64-NEXT: lw a3, 4(sp) -; RV64-NEXT: lw a4, 8(sp) -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: slt a2, a3, a2 -; RV64-NEXT: neg a2, a2 -; RV64-NEXT: slt a3, a5, a4 -; RV64-NEXT: neg a3, a3 -; RV64-NEXT: pktb32 a3, zero, a3 -; RV64-NEXT: pkbb32 a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -38,31 +20,7 @@ define i64 @smaxv2i32_2(i64 %a, i64 %b) nounwind { ; RV64-LABEL: smaxv2i32_2: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bge a5, a4, .LBB1_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB1_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bge a2, a5, .LBB1_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB1_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -75,31 +33,7 @@ define i64 @smaxv2i32_3(i64 %a, i64 %b) nounwind { ; RV64-LABEL: smaxv2i32_3: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: blt a5, a4, .LBB2_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB2_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: blt a2, a5, .LBB2_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB2_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -112,31 +46,7 @@ define i64 @smaxv2i32_4(i64 %a, i64 %b) nounwind { ; RV64-LABEL: smaxv2i32_4: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a4, 12(sp) -; RV64-NEXT: lw a5, 4(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bge a5, a4, .LBB3_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB3_2: -; RV64-NEXT: lw a5, 8(sp) -; RV64-NEXT: lw a2, 0(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bge a2, a5, .LBB3_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB3_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -151,25 +61,7 @@ define i64 @sminv2i32_1(i64 %a, i64 %b) nounwind { ; RV64-LABEL: sminv2i32_1: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a2, 12(sp) -; RV64-NEXT: lw a3, 4(sp) -; RV64-NEXT: lw a4, 8(sp) -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: slt a2, a3, a2 -; RV64-NEXT: neg a2, a2 -; RV64-NEXT: slt a3, a5, a4 -; RV64-NEXT: neg a3, a3 -; RV64-NEXT: pktb32 a3, zero, a3 -; RV64-NEXT: pkbb32 a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -182,31 +74,7 @@ define i64 @sminv2i32_2(i64 %a, i64 %b) nounwind { ; RV64-LABEL: sminv2i32_2: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bge a5, a4, .LBB5_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB5_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bge a2, a5, .LBB5_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB5_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -219,31 +87,7 @@ define i64 @sminv2i32_3(i64 %a, i64 %b) nounwind { ; RV64-LABEL: sminv2i32_3: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: blt a5, a4, .LBB6_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB6_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: blt a2, a5, .LBB6_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB6_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -256,31 +100,7 @@ define i64 @sminv2i32_4(i64 %a, i64 %b) nounwind { ; RV64-LABEL: sminv2i32_4: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a4, 12(sp) -; RV64-NEXT: lw a5, 4(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bge a5, a4, .LBB7_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB7_2: -; RV64-NEXT: lw a5, 8(sp) -; RV64-NEXT: lw a2, 0(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bge a2, a5, .LBB7_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB7_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: smin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -295,31 +115,7 @@ define i64 @umaxv2i32_1(i64 %a, i64 %b) nounwind { ; RV64-LABEL: umaxv2i32_1: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a4, 12(sp) -; RV64-NEXT: lw a5, 4(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bltu a5, a4, .LBB8_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB8_2: -; RV64-NEXT: lw a5, 8(sp) -; RV64-NEXT: lw a2, 0(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bltu a2, a5, .LBB8_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB8_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -332,31 +128,7 @@ define i64 @umaxv2i32_2(i64 %a, i64 %b) nounwind { ; RV64-LABEL: umaxv2i32_2: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bgeu a5, a4, .LBB9_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB9_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bgeu a2, a5, .LBB9_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB9_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -369,31 +141,7 @@ define i64 @umaxv2i32_3(i64 %a, i64 %b) nounwind { ; RV64-LABEL: umaxv2i32_3: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bltu a5, a4, .LBB10_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB10_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bltu a2, a5, .LBB10_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB10_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -406,31 +154,7 @@ define i64 @umaxv2i32_4(i64 %a, i64 %b) nounwind { ; RV64-LABEL: umaxv2i32_4: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a4, 12(sp) -; RV64-NEXT: lw a5, 4(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bgeu a5, a4, .LBB11_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB11_2: -; RV64-NEXT: lw a5, 8(sp) -; RV64-NEXT: lw a2, 0(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bgeu a2, a5, .LBB11_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB11_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umax32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -445,31 +169,7 @@ define i64 @uminv2i32_1(i64 %a, i64 %b) nounwind { ; RV64-LABEL: uminv2i32_1: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a4, 12(sp) -; RV64-NEXT: lw a5, 4(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bltu a5, a4, .LBB12_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB12_2: -; RV64-NEXT: lw a5, 8(sp) -; RV64-NEXT: lw a2, 0(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bltu a2, a5, .LBB12_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB12_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -482,31 +182,7 @@ define i64 @uminv2i32_2(i64 %a, i64 %b) nounwind { ; RV64-LABEL: uminv2i32_2: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bgeu a5, a4, .LBB13_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB13_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bgeu a2, a5, .LBB13_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB13_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -519,31 +195,7 @@ define i64 @uminv2i32_3(i64 %a, i64 %b) nounwind { ; RV64-LABEL: uminv2i32_3: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: lw a4, 4(sp) -; RV64-NEXT: lw a5, 12(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bltu a5, a4, .LBB14_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB14_2: -; RV64-NEXT: lw a5, 0(sp) -; RV64-NEXT: lw a2, 8(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bltu a2, a5, .LBB14_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB14_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> @@ -556,31 +208,7 @@ define i64 @uminv2i32_4(i64 %a, i64 %b) nounwind { ; RV64-LABEL: uminv2i32_4: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: lw a4, 12(sp) -; RV64-NEXT: lw a5, 4(sp) -; RV64-NEXT: addi a6, zero, -1 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: bgeu a5, a4, .LBB15_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, zero -; RV64-NEXT: .LBB15_2: -; RV64-NEXT: lw a5, 8(sp) -; RV64-NEXT: lw a2, 0(sp) -; RV64-NEXT: addi a4, zero, -1 -; RV64-NEXT: bgeu a2, a5, .LBB15_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, zero -; RV64-NEXT: .LBB15_4: -; RV64-NEXT: pktb32 a2, zero, a4 -; RV64-NEXT: pkbb32 a2, a3, a2 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: xor a2, a2, a6 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: umin32 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <2 x i32> %tmp2 = bitcast i64 %b to <2 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvp/vector-maxmin.ll b/llvm/test/CodeGen/RISCV/rvp/vector-maxmin.ll --- a/llvm/test/CodeGen/RISCV/rvp/vector-maxmin.ll +++ b/llvm/test/CodeGen/RISCV/rvp/vector-maxmin.ll @@ -9,22 +9,12 @@ define i32 @smaxv4i8_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv4i8_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i8_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -37,22 +27,12 @@ define i32 @smaxv4i8_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv4i8_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i8_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -65,22 +45,12 @@ define i32 @smaxv4i8_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv4i8_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i8_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -93,22 +63,12 @@ define i32 @smaxv4i8_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv4i8_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i8_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -121,27 +81,13 @@ define i64 @smaxv8i8_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv8i8_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a4, a0, a2 -; RV32-NEXT: scmplt8 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smax8 a1, a1, a3 +; RV32-NEXT: smax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv8i8_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -154,27 +100,13 @@ define i64 @smaxv8i8_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv8i8_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a4, a0, a2 -; RV32-NEXT: scmple8 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smax8 a1, a1, a3 +; RV32-NEXT: smax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv8i8_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -187,27 +119,13 @@ define i64 @smaxv8i8_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv8i8_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a4, a2, a0 -; RV32-NEXT: scmplt8 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smax8 a1, a1, a3 +; RV32-NEXT: smax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv8i8_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -220,27 +138,13 @@ define i64 @smaxv8i8_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv8i8_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a4, a2, a0 -; RV32-NEXT: scmple8 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smax8 a1, a1, a3 +; RV32-NEXT: smax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv8i8_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -255,22 +159,12 @@ define i32 @sminv4i8_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv4i8_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i8_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -283,22 +177,12 @@ define i32 @sminv4i8_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv4i8_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i8_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -311,22 +195,12 @@ define i32 @sminv4i8_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv4i8_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i8_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -339,22 +213,12 @@ define i32 @sminv4i8_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv4i8_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i8_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -367,27 +231,13 @@ define i64 @sminv8i8_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv8i8_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a4, a0, a2 -; RV32-NEXT: scmplt8 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smin8 a1, a1, a3 +; RV32-NEXT: smin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv8i8_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -400,27 +250,13 @@ define i64 @sminv8i8_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv8i8_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a4, a0, a2 -; RV32-NEXT: scmple8 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smin8 a1, a1, a3 +; RV32-NEXT: smin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv8i8_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -433,27 +269,13 @@ define i64 @sminv8i8_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv8i8_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt8 a4, a2, a0 -; RV32-NEXT: scmplt8 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smin8 a1, a1, a3 +; RV32-NEXT: smin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv8i8_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -466,27 +288,13 @@ define i64 @sminv8i8_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv8i8_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple8 a4, a2, a0 -; RV32-NEXT: scmple8 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smin8 a1, a1, a3 +; RV32-NEXT: smin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv8i8_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -501,22 +309,12 @@ define i32 @umaxv4i8_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv4i8_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i8_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -529,22 +327,12 @@ define i32 @umaxv4i8_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv4i8_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i8_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -557,22 +345,12 @@ define i32 @umaxv4i8_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv4i8_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i8_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -585,22 +363,12 @@ define i32 @umaxv4i8_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv4i8_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umax8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i8_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -613,27 +381,13 @@ define i64 @umaxv8i8_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv8i8_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a4, a0, a2 -; RV32-NEXT: ucmplt8 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umax8 a1, a1, a3 +; RV32-NEXT: umax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv8i8_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -646,27 +400,13 @@ define i64 @umaxv8i8_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv8i8_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a4, a0, a2 -; RV32-NEXT: ucmple8 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umax8 a1, a1, a3 +; RV32-NEXT: umax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv8i8_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -679,27 +419,13 @@ define i64 @umaxv8i8_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv8i8_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a4, a2, a0 -; RV32-NEXT: ucmplt8 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umax8 a1, a1, a3 +; RV32-NEXT: umax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv8i8_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -712,27 +438,13 @@ define i64 @umaxv8i8_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv8i8_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a4, a2, a0 -; RV32-NEXT: ucmple8 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umax8 a1, a1, a3 +; RV32-NEXT: umax8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv8i8_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -747,22 +459,12 @@ define i32 @uminv4i8_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv4i8_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i8_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -775,22 +477,12 @@ define i32 @uminv4i8_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv4i8_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i8_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -803,22 +495,12 @@ define i32 @uminv4i8_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv4i8_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i8_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -831,22 +513,12 @@ define i32 @uminv4i8_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv4i8_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umin8 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i8_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <4 x i8> %tmp2 = bitcast i32 %b to <4 x i8> @@ -859,27 +531,13 @@ define i64 @uminv8i8_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv8i8_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a4, a0, a2 -; RV32-NEXT: ucmplt8 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umin8 a1, a1, a3 +; RV32-NEXT: umin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv8i8_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -892,27 +550,13 @@ define i64 @uminv8i8_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv8i8_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a4, a0, a2 -; RV32-NEXT: ucmple8 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umin8 a1, a1, a3 +; RV32-NEXT: umin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv8i8_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -925,27 +569,13 @@ define i64 @uminv8i8_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv8i8_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt8 a4, a2, a0 -; RV32-NEXT: ucmplt8 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umin8 a1, a1, a3 +; RV32-NEXT: umin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv8i8_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -958,27 +588,13 @@ define i64 @uminv8i8_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv8i8_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple8 a4, a2, a0 -; RV32-NEXT: ucmple8 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umin8 a1, a1, a3 +; RV32-NEXT: umin8 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv8i8_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple8 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin8 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <8 x i8> %tmp2 = bitcast i64 %b to <8 x i8> @@ -993,22 +609,12 @@ define i32 @smaxv2i16_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv2i16_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv2i16_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1021,22 +627,12 @@ define i32 @smaxv2i16_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv2i16_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv2i16_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1049,22 +645,12 @@ define i32 @smaxv2i16_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv2i16_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv2i16_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1077,22 +663,12 @@ define i32 @smaxv2i16_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: smaxv2i16_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv2i16_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1105,27 +681,13 @@ define i64 @smaxv4i16_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv4i16_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a4, a0, a2 -; RV32-NEXT: scmplt16 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smax16 a1, a1, a3 +; RV32-NEXT: smax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i16_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1138,27 +700,13 @@ define i64 @smaxv4i16_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv4i16_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a4, a0, a2 -; RV32-NEXT: scmple16 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smax16 a1, a1, a3 +; RV32-NEXT: smax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i16_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1171,27 +719,13 @@ define i64 @smaxv4i16_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv4i16_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a4, a2, a0 -; RV32-NEXT: scmplt16 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smax16 a1, a1, a3 +; RV32-NEXT: smax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i16_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1204,27 +738,13 @@ define i64 @smaxv4i16_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: smaxv4i16_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a4, a2, a0 -; RV32-NEXT: scmple16 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smax16 a1, a1, a3 +; RV32-NEXT: smax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: smaxv4i16_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1239,22 +759,12 @@ define i32 @sminv2i16_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv2i16_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv2i16_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1267,22 +777,12 @@ define i32 @sminv2i16_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv2i16_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: smin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv2i16_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1295,22 +795,12 @@ define i32 @sminv2i16_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv2i16_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv2i16_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1323,22 +813,12 @@ define i32 @sminv2i16_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: sminv2i16_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: smin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv2i16_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1351,27 +831,13 @@ define i64 @sminv4i16_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv4i16_1: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a4, a0, a2 -; RV32-NEXT: scmplt16 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smin16 a1, a1, a3 +; RV32-NEXT: smin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i16_1: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1384,27 +850,13 @@ define i64 @sminv4i16_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv4i16_2: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a4, a0, a2 -; RV32-NEXT: scmple16 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: smin16 a1, a1, a3 +; RV32-NEXT: smin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i16_2: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1417,27 +869,13 @@ define i64 @sminv4i16_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv4i16_3: ; RV32: # %bb.0: -; RV32-NEXT: scmplt16 a4, a2, a0 -; RV32-NEXT: scmplt16 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smin16 a1, a1, a3 +; RV32-NEXT: smin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i16_3: ; RV64: # %bb.0: -; RV64-NEXT: scmplt16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1450,27 +888,13 @@ define i64 @sminv4i16_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: sminv4i16_4: ; RV32: # %bb.0: -; RV32-NEXT: scmple16 a4, a2, a0 -; RV32-NEXT: scmple16 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: smin16 a1, a1, a3 +; RV32-NEXT: smin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: sminv4i16_4: ; RV64: # %bb.0: -; RV64-NEXT: scmple16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: smin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1485,22 +909,12 @@ define i32 @umaxv2i16_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv2i16_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv2i16_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1513,22 +927,12 @@ define i32 @umaxv2i16_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv2i16_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a2, a0, a1 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv2i16_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1541,22 +945,12 @@ define i32 @umaxv2i16_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv2i16_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv2i16_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1569,22 +963,12 @@ define i32 @umaxv2i16_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: umaxv2i16_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a2, a1, a0 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umax16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv2i16_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1597,27 +981,13 @@ define i64 @umaxv4i16_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv4i16_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a4, a0, a2 -; RV32-NEXT: ucmplt16 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umax16 a1, a1, a3 +; RV32-NEXT: umax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i16_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1630,27 +1000,13 @@ define i64 @umaxv4i16_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv4i16_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a4, a0, a2 -; RV32-NEXT: ucmple16 a5, a1, a3 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umax16 a1, a1, a3 +; RV32-NEXT: umax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i16_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a0, a1 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1663,27 +1019,13 @@ define i64 @umaxv4i16_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv4i16_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a4, a2, a0 -; RV32-NEXT: ucmplt16 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umax16 a1, a1, a3 +; RV32-NEXT: umax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i16_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1696,27 +1038,13 @@ define i64 @umaxv4i16_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: umaxv4i16_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a4, a2, a0 -; RV32-NEXT: ucmple16 a5, a3, a1 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umax16 a1, a1, a3 +; RV32-NEXT: umax16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umaxv4i16_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a1, a0 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umax16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1731,22 +1059,12 @@ define i32 @uminv2i16_1(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv2i16_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv2i16_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1759,22 +1077,12 @@ define i32 @uminv2i16_2(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv2i16_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a2, a0, a1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: umin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv2i16_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1787,22 +1095,12 @@ define i32 @uminv2i16_3(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv2i16_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv2i16_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1815,22 +1113,12 @@ define i32 @uminv2i16_4(i32 %a, i32 %b) nounwind { ; RV32-LABEL: uminv2i16_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a2, a1, a0 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: addi a3, zero, -1 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: umin16 a0, a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv2i16_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i32 %a to <2 x i16> %tmp2 = bitcast i32 %b to <2 x i16> @@ -1843,27 +1131,13 @@ define i64 @uminv4i16_1(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv4i16_1: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a4, a0, a2 -; RV32-NEXT: ucmplt16 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umin16 a1, a1, a3 +; RV32-NEXT: umin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i16_1: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1876,27 +1150,13 @@ define i64 @uminv4i16_2(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv4i16_2: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a4, a0, a2 -; RV32-NEXT: ucmple16 a5, a1, a3 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: and a0, a0, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: umin16 a1, a1, a3 +; RV32-NEXT: umin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i16_2: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a0, a1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1909,27 +1169,13 @@ define i64 @uminv4i16_3(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv4i16_3: ; RV32: # %bb.0: -; RV32-NEXT: ucmplt16 a4, a2, a0 -; RV32-NEXT: ucmplt16 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umin16 a1, a1, a3 +; RV32-NEXT: umin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i16_3: ; RV64: # %bb.0: -; RV64-NEXT: ucmplt16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16> @@ -1942,27 +1188,13 @@ define i64 @uminv4i16_4(i64 %a, i64 %b) nounwind { ; RV32-LABEL: uminv4i16_4: ; RV32: # %bb.0: -; RV32-NEXT: ucmple16 a4, a2, a0 -; RV32-NEXT: ucmple16 a5, a3, a1 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: addi a6, zero, -1 -; RV32-NEXT: xor a5, a5, a6 -; RV32-NEXT: and a1, a1, a5 -; RV32-NEXT: or a1, a3, a1 -; RV32-NEXT: and a2, a2, a4 -; RV32-NEXT: xor a3, a4, a6 -; RV32-NEXT: and a0, a0, a3 -; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: umin16 a1, a1, a3 +; RV32-NEXT: umin16 a0, a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uminv4i16_4: ; RV64: # %bb.0: -; RV64-NEXT: ucmple16 a2, a1, a0 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: umin16 a0, a0, a1 ; RV64-NEXT: ret %tmp1 = bitcast i64 %a to <4 x i16> %tmp2 = bitcast i64 %b to <4 x i16>