diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -71,6 +71,24 @@ } } +static unsigned getRegClassIDForLMUL(RISCVVLMUL LMul) { + switch (LMul) { + default: + llvm_unreachable("Invalid LMUL."); + case RISCVVLMUL::LMUL_F8: + case RISCVVLMUL::LMUL_F4: + case RISCVVLMUL::LMUL_F2: + case RISCVVLMUL::LMUL_1: + return RISCV::VRRegClassID; + case RISCVVLMUL::LMUL_2: + return RISCV::VRM2RegClassID; + case RISCVVLMUL::LMUL_4: + return RISCV::VRM4RegClassID; + case RISCVVLMUL::LMUL_8: + return RISCV::VRM8RegClassID; + } +} + static unsigned getSubregIndexByMVT(MVT VT, unsigned Index) { RISCVVLMUL LMUL = getLMUL(VT); if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 || @@ -823,6 +841,48 @@ } break; } + case ISD::INSERT_SUBVECTOR: { + // Bail when not a "cast" like insert_subvector. + if (Node->getConstantOperandVal(2) != 0) + break; + if (!Node->getOperand(0).isUndef()) + break; + + // Bail when normal isel should do the job. + EVT InVT = Node->getOperand(1).getValueType(); + if (VT.isFixedLengthVector() || InVT.isScalableVector()) + break; + + SDValue V = Node->getOperand(1); + SDLoc DL(V); + unsigned RegClassID = getRegClassIDForLMUL(getLMUL(VT)); + SDValue RC = + CurDAG->getTargetConstant(RegClassID, DL, Subtarget->getXLenVT()); + SDNode *NewNode = + CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC); + ReplaceNode(Node, NewNode); + return; + } + case ISD::EXTRACT_SUBVECTOR: { + // Bail when not a "cast" like extract_subvector. + if (Node->getConstantOperandVal(1) != 0) + break; + + // Bail when normal isel can do the job. + EVT InVT = Node->getOperand(0).getValueType(); + if (VT.isScalableVector() || InVT.isFixedLengthVector()) + break; + + SDValue V = Node->getOperand(0); + SDLoc DL(V); + unsigned RegClassID = getRegClassIDForLMUL(getLMUL(InVT.getSimpleVT())); + SDValue RC = + CurDAG->getTargetConstant(RegClassID, DL, Subtarget->getXLenVT()); + SDNode *NewNode = + CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC); + ReplaceNode(Node, NewNode); + return; + } } // Select the default instruction. diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -3435,3 +3435,57 @@ store <4 x i64> %c, <4 x i64>* %x ret void } + +define void @extract_v4i64(<4 x i64>* %x, <4 x i64>* %y) { +; LMULMAX2-LABEL: extract_v4i64: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: addi a2, zero, 4 +; LMULMAX2-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; LMULMAX2-NEXT: vle64.v v26, (a0) +; LMULMAX2-NEXT: vle64.v v28, (a1) +; LMULMAX2-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; LMULMAX2-NEXT: vadd.vv v26, v26, v28 +; LMULMAX2-NEXT: vse64.v v26, (a0) +; LMULMAX2-NEXT: ret +; +; LMULMAX1-RV32-LABEL: extract_v4i64: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: addi a2, zero, 2 +; LMULMAX1-RV32-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; LMULMAX1-RV32-NEXT: vle64.v v25, (a0) +; LMULMAX1-RV32-NEXT: addi a3, a0, 16 +; LMULMAX1-RV32-NEXT: vle64.v v26, (a3) +; LMULMAX1-RV32-NEXT: vle64.v v27, (a1) +; LMULMAX1-RV32-NEXT: addi a1, a1, 16 +; LMULMAX1-RV32-NEXT: vle64.v v28, (a1) +; LMULMAX1-RV32-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; LMULMAX1-RV32-NEXT: vadd.vv v26, v26, v28 +; LMULMAX1-RV32-NEXT: vadd.vv v25, v25, v27 +; LMULMAX1-RV32-NEXT: vse64.v v25, (a0) +; LMULMAX1-RV32-NEXT: vse64.v v26, (a3) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: extract_v4i64: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: addi a2, zero, 2 +; LMULMAX1-RV64-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; LMULMAX1-RV64-NEXT: vle64.v v25, (a0) +; LMULMAX1-RV64-NEXT: addi a3, a0, 16 +; LMULMAX1-RV64-NEXT: vle64.v v26, (a3) +; LMULMAX1-RV64-NEXT: vle64.v v27, (a1) +; LMULMAX1-RV64-NEXT: addi a1, a1, 16 +; LMULMAX1-RV64-NEXT: vle64.v v28, (a1) +; LMULMAX1-RV64-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v28 +; LMULMAX1-RV64-NEXT: vadd.vv v25, v25, v27 +; LMULMAX1-RV64-NEXT: vse64.v v25, (a0) +; LMULMAX1-RV64-NEXT: vse64.v v26, (a3) +; LMULMAX1-RV64-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %x + %b = load <4 x i64>, <4 x i64>* %y + br label %"compute" +"compute": + %c = add <4 x i64> %a, %b + store <4 x i64> %c, <4 x i64>* %x + ret void +}