diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -975,7 +975,11 @@ if (Subtarget.hasVInstructions()) setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER, ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL, - ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR}); + ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR, ISD::MUL, + ISD::UDIV, ISD::SDIV}); + if (Subtarget.hasVInstructions() && !Subtarget.hasStdExtZbb()) + setTargetDAGCombine({ISD::UMIN, ISD::UMAX}); + if (Subtarget.useRVVForFixedLengthVectors()) setTargetDAGCombine(ISD::BITCAST); @@ -8262,8 +8266,93 @@ return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT)); } +// fold op(a, select(mask, b, imm)) -> op_mask(a, b, mask) if op(a, imm) == a. +// TODO: Support FP ops. +static SDValue performVBinMaskCombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + EVT VT = N->getValueType(0); + + if (!VT.isVector()) + return SDValue(); + + // TODO: Support fixed vector. + if (VT.isFixedLengthVector()) + return SDValue(); + + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + + unsigned RISCVVLISDOpc; + int64_t Imm; + bool IsCommutable; + +#define OP_CASE(OP, IDENTIFY_VALUE, IS_COMMUTABLE) \ + case ISD::OP: \ + RISCVVLISDOpc = RISCVISD::OP##_VL; \ + Imm = IDENTIFY_VALUE; \ + IsCommutable = IS_COMMUTABLE; \ + break; + + switch (N->getOpcode()) { + OP_CASE(ADD, 0, true) + OP_CASE(SUB, 0, false) + OP_CASE(MUL, 1, true) + OP_CASE(UDIV, 1, false) + OP_CASE(SDIV, 1, false) + OP_CASE(AND, -1, true) + OP_CASE(OR, 0, true) + OP_CASE(XOR, 0, true) + OP_CASE(SHL, 0, false) + OP_CASE(SRL, 0, false) + OP_CASE(SRA, 0, false) + OP_CASE(UMIN, -1, true) + OP_CASE(UMAX, 0, true) + default: + return SDValue(); + } + +#undef OP_CASE + + if (N1.getOpcode() != ISD::VSELECT) { + if (!IsCommutable || N0.getOpcode() != ISD::VSELECT) + return SDValue(); + std::swap(N0, N1); + } + + if (!N1.hasOneUse()) + return SDValue(); + + SDValue Mask = N1->getOperand(0); + SDValue TrueVal = N1->getOperand(1); + SDValue FalseVal = N1->getOperand(2); + + int64_t SplatImm; + + if (FalseVal.getOpcode() == ISD::SPLAT_VECTOR && + isa(FalseVal.getOperand(0))) + SplatImm = cast(FalseVal.getOperand(0))->getSExtValue(); + else if (FalseVal.getOpcode() == RISCVISD::VMV_V_X_VL && + FalseVal.getOperand(0).isUndef() && + isa(FalseVal.getOperand(1))) + SplatImm = cast(FalseVal.getOperand(1))->getSExtValue(); + else + return SDValue(); + + + + if (SplatImm != Imm) + return SDValue(); + + SDValue VL = DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()); + + return DAG.getNode(RISCVVLISDOpc, SDLoc(N), VT, N0, TrueVal, N0, Mask, VL); +} + static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; + if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget)) return V; if (SDValue V = transformAddShlImm(N, DAG, Subtarget)) @@ -8275,7 +8364,11 @@ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); } -static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; + SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -8307,6 +8400,9 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; + SDValue N0 = N->getOperand(0); // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero // extending X. This is safe since we only need the LSB after the shift and @@ -8336,6 +8432,9 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; + if (Subtarget.hasStdExtZbp()) { if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget)) return GREV; @@ -8352,7 +8451,11 @@ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); } -static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; + SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -8865,6 +8968,9 @@ const RISCVSubtarget &Subtarget) { assert(N->getOpcode() == ISD::SRA && "Unexpected opcode"); + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; + if (N->getValueType(0) != MVT::i64 || !Subtarget.is64Bit()) return SDValue(); @@ -9237,16 +9343,23 @@ case ISD::ADD: return performADDCombine(N, DAG, Subtarget); case ISD::SUB: - return performSUBCombine(N, DAG); + return performSUBCombine(N, DAG, Subtarget); + case ISD::MUL: + case ISD::UDIV: + case ISD::SDIV: + return performVBinMaskCombine(N, DAG, Subtarget); case ISD::AND: return performANDCombine(N, DAG, Subtarget); case ISD::OR: return performORCombine(N, DAG, Subtarget); case ISD::XOR: - return performXORCombine(N, DAG); - case ISD::FADD: + return performXORCombine(N, DAG, Subtarget); case ISD::UMAX: case ISD::UMIN: + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; + [[fallthrough]]; + case ISD::FADD: case ISD::SMAX: case ISD::SMIN: case ISD::FMAXNUM: @@ -9443,6 +9556,8 @@ [[fallthrough]]; case ISD::SRL: case ISD::SHL: { + if (SDValue V = performVBinMaskCombine(N, DAG, Subtarget)) + return V; SDValue ShAmt = N->getOperand(1); if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { // We don't need the upper 32 bits of a 64-bit element for a shift amount. diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll @@ -890,3 +890,40 @@ %v = add %splat1, %splat2 ret %v } + +define @vadd_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vadd_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %vs = select %mask, %vb, zeroinitializer + %vc = add %va, %vs + ret %vc +} + +define @vadd_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vadd_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = add %va, %vs + ret %vc +} + +define @vadd_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vadd_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vadd.vi v8, v8, 7, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = add %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll @@ -1381,3 +1381,46 @@ %v = and %splat1, %splat2 ret %v } + +define @vand_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vand_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %allones = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %vb, %allones + %vc = and %va, %vs + ret %vc +} + +define @vand_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vand_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 -1, i32 0 + %allones = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %allones + %vc = and %va, %vs + ret %vc +} + +define @vand_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vand_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vand.vi v8, v8, 7, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 -1, i32 0 + %allones = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %allones + %vc = and %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll @@ -1184,3 +1184,47 @@ %vc = sdiv %va, %splat ret %vc } + +define @vdiv_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vdiv_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 1, i32 0 + %one = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %vb, %one + %vc = sdiv %va, %vs + ret %vc +} + +define @vdiv_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vdiv_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 1, i32 0 + %one = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %one + %vc = sdiv %va, %vs + ret %vc +} + +define @vdiv_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vdiv_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 1, i32 0 + %one = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %one + %vc = sdiv %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll @@ -1197,3 +1197,47 @@ %vd = udiv %va, %vc ret %vd } + +define @vdivu_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vdivu_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 1, i32 0 + %one = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %vb, %one + %vc = udiv %va, %vs + ret %vc +} + +define @vdivu_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vdivu_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 1, i32 0 + %one = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %one + %vc = udiv %va, %vs + ret %vc +} + +define @vdivu_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vdivu_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 1, i32 0 + %one = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %one + %vc = udiv %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll @@ -890,3 +890,43 @@ ret %vc } +define @vmax_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vmax_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %vs = select %mask, %vb, zeroinitializer + %cmp = icmp ugt %va, %vs + %vc = select %cmp, %va, %vs + ret %vc +} + +define @vmax_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vmax_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %cmp = icmp ugt %va, %vs + %vc = select %cmp, %va, %vs + ret %vc +} + +define @vmax_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vmax_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -3 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 -3, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %cmp = icmp ugt %va, %vs + %vc = select %cmp, %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll @@ -890,3 +890,49 @@ ret %vc } +define @vmin_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vmin_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vminu.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %max = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %vb, %max + %cmp = icmp ult %va, %vs + %vc = select %cmp, %va, %vs + ret %vc +} + +define @vmin_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vmin_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head0 = insertelement poison, i32 -1, i32 0 + %max = shufflevector %head0, poison, zeroinitializer + %head1 = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head1, poison, zeroinitializer + %vs = select %mask, %splat, %max + %cmp = icmp ult %va, %vs + %vc = select %cmp, %va, %vs + ret %vc +} + +define @vmin_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vmin_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -3 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head0 = insertelement poison, i32 -1, i32 0 + %max = shufflevector %head0, poison, zeroinitializer + %head1 = insertelement poison, i32 -3, i32 0 + %splat = shufflevector %head1, poison, zeroinitializer + %vs = select %mask, %splat, %max + %cmp = icmp ult %va, %vs + %vc = select %cmp, %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll @@ -959,3 +959,48 @@ %v = mul %splat1, %splat2 ret %v } + +define @vmul_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vmul_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 1, i32 0 + %one = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %vb, %one + %vc = mul %va, %vs + ret %vc +} + +define @vmul_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vmul_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 1, i32 0 + %one = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %one + %vc = mul %va, %vs + ret %vc +} + +define @vmul_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vmul_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head1 = insertelement poison, i32 1, i32 0 + %one = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head2, poison, zeroinitializer + %vs = select %mask, %splat, %one + %vc = mul %va, %vs + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll @@ -1174,3 +1174,40 @@ %v = or %splat1, %splat2 ret %v } + +define @vor_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vor_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %vs = select %mask, %vb, zeroinitializer + %vc = or %va, %vs + ret %vc +} + +define @vor_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vor_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = or %va, %vs + ret %vc +} + +define @vor_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vor_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vor.vi v8, v8, 7, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = or %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll @@ -1234,4 +1234,3 @@ %vc = srem %va, %splat ret %vc } - diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll @@ -629,3 +629,40 @@ %vc = shl %va, %splat ret %vc } + +define @vshl_vv_mask_nxv4i32( %va, %vb, %mask) { +; CHECK-LABEL: vshl_vv_mask_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %vs = select %mask, %vb, zeroinitializer + %vc = shl %va, %vs + ret %vc +} + +define @vshl_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vshl_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = shl %va, %vs + ret %vc +} + +define @vshl_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vshl_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsll.vi v8, v8, 31, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 31, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = shl %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll @@ -802,3 +802,39 @@ ret %vc } +define @vsra_vv_mask_nxv4i32( %va, %vb, %mask) { +; CHECK-LABEL: vsra_vv_mask_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %vs = select %mask, %vb, zeroinitializer + %vc = ashr %va, %vs + ret %vc +} + +define @vsra_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vsra_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = ashr %va, %vs + ret %vc +} + +define @vsra_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vsra_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsra.vi v8, v8, 31, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 31, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = ashr %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll @@ -582,3 +582,39 @@ ret %vc } +define @vsrl_vv_mask_nxv4i32( %va, %vb, %mask) { +; CHECK-LABEL: vsrl_vv_mask_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %vs = select %mask, %vb, zeroinitializer + %vc = lshr %va, %vs + ret %vc +} + +define @vsrl_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vsrl_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = lshr %va, %vs + ret %vc +} + +define @vsrl_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vsrl_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsrl.vi v8, v8, 31, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 31, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = lshr %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll @@ -868,3 +868,42 @@ %v = sub %splat1, %splat2 ret %v } + +define @vsub_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vsub_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + + %vs = select %mask, %vb, zeroinitializer + %vc = sub %va, %vs + ret %vc +} + +define @vsub_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vsub_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = sub %va, %vs + ret %vc +} + +define @vsub_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vsub_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = sub %va, %vs + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -1381,3 +1381,40 @@ %v = xor %splat1, %splat2 ret %v } + +define @vxor_vv_mask_nxv8i32( %va, %vb, %mask) { +; CHECK-LABEL: vxor_vv_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %vs = select %mask, %vb, zeroinitializer + %vc = xor %va, %vs + ret %vc +} + +define @vxor_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { +; CHECK-LABEL: vxor_vx_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = xor %va, %vs + ret %vc +} + +define @vxor_vi_mask_nxv8i32( %va, %mask) { +; CHECK-LABEL: vxor_vi_mask_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i32 7, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vs = select %mask, %splat, zeroinitializer + %vc = xor %va, %vs + ret %vc +}