diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -995,7 +995,7 @@ case ISD::FLOG10: case ISD::FLOG2: case ISD::FNEARBYINT: - case ISD::FNEG: + case ISD::FNEG: case ISD::VP_FNEG: case ISD::FREEZE: case ISD::ARITH_FENCE: case ISD::FP_EXTEND: @@ -2069,15 +2069,33 @@ else std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); - if (N->getOpcode() == ISD::FP_ROUND) { - Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo, N->getOperand(1), - N->getFlags()); - Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi, N->getOperand(1), - N->getFlags()); - } else { - Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo, N->getFlags()); - Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi, N->getFlags()); + const SDNodeFlags Flags = N->getFlags(); + unsigned Opcode = N->getOpcode(); + if (N->getNumOperands() <= 2) { + if (Opcode == ISD::FP_ROUND) { + Lo = DAG.getNode(Opcode, dl, LoVT, Lo, N->getOperand(1), Flags); + Hi = DAG.getNode(Opcode, dl, HiVT, Hi, N->getOperand(1), Flags); + } else { + Lo = DAG.getNode(Opcode, dl, LoVT, Lo, Flags); + Hi = DAG.getNode(Opcode, dl, HiVT, Hi, Flags); + } + return; } + + assert(N->getNumOperands() == 3 && "Unexpected number of operands!"); + assert(N->isVPOpcode() && "Expected VP opcode"); + + SDValue MaskLo, MaskHi; + std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1)); + + SDValue EVLLo, EVLHi; + std::tie(EVLLo, EVLHi) = + DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl); + + Lo = DAG.getNode(Opcode, dl, Lo.getValueType(), + {Lo, MaskLo, EVLLo}, Flags); + Hi = DAG.getNode(Opcode, dl, Hi.getValueType(), + {Hi, MaskHi, EVLHi}, Flags); } void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo, @@ -3417,7 +3435,7 @@ case ISD::CTPOP: case ISD::CTTZ: case ISD::CTTZ_ZERO_UNDEF: - case ISD::FNEG: + case ISD::FNEG: case ISD::VP_FNEG: case ISD::FREEZE: case ISD::ARITH_FENCE: case ISD::FCANONICALIZE: @@ -4028,7 +4046,16 @@ // Unary op widening. EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); SDValue InOp = GetWidenedVector(N->getOperand(0)); - return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp); + if (N->getNumOperands() == 1) + return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp); + + assert(N->getNumOperands() == 3 && "Unexpected number of operands!"); + assert(N->isVPOpcode() && "Expected VP opcode"); + + SDValue Mask = + GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount()); + return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, + {InOp, Mask, N->getOperand(2)}); } SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -268,6 +268,30 @@ ret <8 x double> %v } +declare <15 x double> @llvm.vp.fneg.v15f64(<15 x double>, <15 x i1>, i32) + +define <15 x double> @vfneg_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_v15f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfneg.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) + ret <15 x double> %v +} + +define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_v15f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement <15 x i1> poison, i1 true, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) + ret <15 x double> %v +} + declare <16 x double> @llvm.vp.fneg.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vfneg_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { @@ -291,3 +315,57 @@ %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } + +declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32) + +define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfneg.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfneg.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} + +define <32 x double> @vfneg_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_v32f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: bltu a0, a1, .LBB27_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfsgnjn.vv v16, v16, v16 +; CHECK-NEXT: bltu a0, a1, .LBB27_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement <32 x i1> poison, i1 true, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -340,6 +340,30 @@ ret %v } +declare @llvm.vp.fneg.nxv7f64(, , i32) + +define @vfneg_vv_nxv7f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfneg.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fneg.nxv7f64( %va, %m, i32 %evl) + ret %v +} + +define @vfneg_vv_nxv7f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fneg.nxv7f64( %va, %m, i32 %evl) + ret %v +} + declare @llvm.vp.fneg.nxv8f64(, , i32) define @vfneg_vv_nxv8f64( %va, %m, i32 zeroext %evl) { @@ -363,3 +387,60 @@ %v = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) ret %v } + +; Test splitting. +declare @llvm.vp.fneg.nxv16f64(, , i32) + +define @vfneg_vv_nxv16f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_nxv16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB32_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vfneg.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB32_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfneg.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fneg.nxv16f64( %va, %m, i32 %evl) + ret %v +} + +define @vfneg_vv_nxv16f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfneg_vv_nxv16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a1, .LBB33_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: bltu a0, a1, .LBB33_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB33_4: +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vv v16, v16, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fneg.nxv16f64( %va, %m, i32 %evl) + ret %v +}