diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -198,18 +198,22 @@ // llvm.vp.smin(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_smin, 2, 3, VP_SMIN, -1) +VP_PROPERTY_BINARYOP END_REGISTER_VP(vp_smin, VP_SMIN) // llvm.vp.smax(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_smax, 2, 3, VP_SMAX, -1) +VP_PROPERTY_BINARYOP END_REGISTER_VP(vp_smax, VP_SMAX) // llvm.vp.umin(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_umin, 2, 3, VP_UMIN, -1) +VP_PROPERTY_BINARYOP END_REGISTER_VP(vp_umin, VP_UMIN) // llvm.vp.umax(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_umax, 2, 3, VP_UMAX, -1) +VP_PROPERTY_BINARYOP END_REGISTER_VP(vp_umax, VP_UMAX) ///// } Integer Arithmetic @@ -274,10 +278,12 @@ // llvm.vp.minnum(x, y, mask,vlen) BEGIN_REGISTER_VP(vp_minnum, 2, 3, VP_FMINNUM, -1) +VP_PROPERTY_BINARYOP END_REGISTER_VP(vp_minnum, VP_FMINNUM) // llvm.vp.maxnum(x, y, mask,vlen) BEGIN_REGISTER_VP(vp_maxnum, 2, 3, VP_FMAXNUM, -1) +VP_PROPERTY_BINARYOP END_REGISTER_VP(vp_maxnum, VP_FMAXNUM) // llvm.vp.ceil(x,mask,vlen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll @@ -362,13 +362,9 @@ define <256 x i8> @vmax_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ; CHECK-LABEL: vmax_vx_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) +; CHECK-NEXT: li a1, 128 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll @@ -361,13 +361,9 @@ define <256 x i8> @vmaxu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ; CHECK-LABEL: vmaxu_vx_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) +; CHECK-NEXT: li a1, 128 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll @@ -362,13 +362,9 @@ define <256 x i8> @vmin_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ; CHECK-LABEL: vmin_vx_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) +; CHECK-NEXT: li a1, 128 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll @@ -361,13 +361,9 @@ define <256 x i8> @vminu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ; CHECK-LABEL: vminu_vx_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) +; CHECK-NEXT: li a1, 128 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll @@ -1150,19 +1150,27 @@ ; for ISD::VSCALE. define @vmax_vx_nxv32i32_evl_nx16( %va, i32 %b, %m) { -; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a2, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a2 -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vmax_vx_nxv32i32_evl_nx16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 1 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmax.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vmax_vx_nxv32i32_evl_nx16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 2 +; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; RV64-NEXT: vslidedown.vx v24, v0, a2 +; RV64-NEXT: slli a1, a1, 1 +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV64-NEXT: vmax.vx v8, v8, a0, v0.t +; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: vmax.vx v16, v16, a0, v0.t +; RV64-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %evl = call i32 @llvm.vscale.i32() diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll @@ -1149,19 +1149,27 @@ ; for ISD::VSCALE. define @vmaxu_vx_nxv32i32_evl_nx16( %va, i32 %b, %m) { -; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a2, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a2 -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vmaxu_vx_nxv32i32_evl_nx16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 1 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmaxu.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vmaxu_vx_nxv32i32_evl_nx16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 2 +; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; RV64-NEXT: vslidedown.vx v24, v0, a2 +; RV64-NEXT: slli a1, a1, 1 +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t +; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: vmaxu.vx v16, v16, a0, v0.t +; RV64-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %evl = call i32 @llvm.vscale.i32() diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -1150,19 +1150,27 @@ ; for ISD::VSCALE. define @vmin_vx_nxv32i32_evl_nx16( %va, i32 %b, %m) { -; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a2, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a2 -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vmin_vx_nxv32i32_evl_nx16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 1 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmin.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vmin_vx_nxv32i32_evl_nx16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 2 +; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; RV64-NEXT: vslidedown.vx v24, v0, a2 +; RV64-NEXT: slli a1, a1, 1 +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV64-NEXT: vmin.vx v8, v8, a0, v0.t +; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: vmin.vx v16, v16, a0, v0.t +; RV64-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %evl = call i32 @llvm.vscale.i32() diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll @@ -1149,19 +1149,27 @@ ; for ISD::VSCALE. define @vminu_vx_nxv32i32_evl_nx16( %va, i32 %b, %m) { -; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a2, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a2 -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vminu_vx_nxv32i32_evl_nx16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 1 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vminu.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vminu_vx_nxv32i32_evl_nx16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 2 +; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; RV64-NEXT: vslidedown.vx v24, v0, a2 +; RV64-NEXT: slli a1, a1, 1 +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV64-NEXT: vminu.vx v8, v8, a0, v0.t +; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: vminu.vx v16, v16, a0, v0.t +; RV64-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %evl = call i32 @llvm.vscale.i32()