diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -45,11 +45,12 @@ bool SelectAddrFI(SDValue Addr, SDValue &Base); - bool SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt); - bool SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt); - bool SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt); - bool SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt); - bool SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt); + bool MatchSRLIW(SDNode *N) const; + bool MatchSLOI(SDNode *N) const; + bool MatchSROI(SDNode *N) const; + bool MatchSROIW(SDNode *N) const; + bool MatchSLLIUW(SDNode *N) const; + bool selectVSplat(SDValue N, SDValue &SplatVal); bool selectVSplatSimm5(SDValue N, SDValue &SplatVal); bool selectVSplatUimm5(SDValue N, SDValue &SplatVal); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -49,17 +49,6 @@ return Result; } -// Returns true if the Node is an ISD::AND with a constant argument. If so, -// set Mask to that constant value. -static bool isConstantMask(SDNode *Node, uint64_t &Mask) { - if (Node->getOpcode() == ISD::AND && - Node->getOperand(1).getOpcode() == ISD::Constant) { - Mask = cast(Node->getOperand(1))->getZExtValue(); - return true; - } - return false; -} - void RISCVDAGToDAGISel::Select(SDNode *Node) { // If we have a custom node, we have already selected. if (Node->isMachineOpcode()) { @@ -121,27 +110,6 @@ ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm)); return; } - case ISD::SRL: { - if (!Subtarget->is64Bit()) - break; - SDNode *Op0 = Node->getOperand(0).getNode(); - uint64_t Mask; - // Match (srl (and val, mask), imm) where the result would be a - // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result - // is equivalent to this (SimplifyDemandedBits may have removed lower bits - // from the mask that aren't necessary due to the right-shifting). - if (isa(Node->getOperand(1)) && isConstantMask(Op0, Mask)) { - uint64_t ShAmt = Node->getConstantOperandVal(1); - - if ((Mask | maskTrailingOnes(ShAmt)) == 0xffffffff) { - SDValue ShAmtVal = CurDAG->getTargetConstant(ShAmt, DL, XLenVT); - CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0->getOperand(0), - ShAmtVal); - return; - } - } - break; - } case ISD::INTRINSIC_W_CHAIN: { unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); switch (IntNo) { @@ -238,198 +206,147 @@ return false; } -// Check that it is a SLOI (Shift Left Ones Immediate). We first check that -// it is the right node tree: +// Match (srl (and val, mask), imm) where the result would be a +// zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result +// is equivalent to this (SimplifyDemandedBits may have removed lower bits +// from the mask that aren't necessary due to the right-shifting). +bool RISCVDAGToDAGISel::MatchSRLIW(SDNode *N) const { + assert(N->getOpcode() == ISD::SRL); + assert(N->getOperand(0).getOpcode() == ISD::AND); + assert(isa(N->getOperand(1))); + assert(isa(N->getOperand(0).getOperand(1))); + + // The IsRV64 predicate is checked after PatFrag predicates so we can get + // here even on RV32. + if (!Subtarget->is64Bit()) + return false; + + SDValue And = N->getOperand(0); + uint64_t ShAmt = N->getConstantOperandVal(1); + uint64_t Mask = And.getConstantOperandVal(1); + return (Mask | maskTrailingOnes(ShAmt)) == 0xffffffff; +} + +// Check that it is a SLOI (Shift Left Ones Immediate). A PatFrag has already +// determined it has the right structure: // // (OR (SHL RS1, VC2), VC1) // -// and then we check that VC1, the mask used to fill with ones, is compatible +// Check that VC1, the mask used to fill with ones, is compatible // with VC2, the shamt: // -// VC1 == maskTrailingOnes(VC2) - -bool RISCVDAGToDAGISel::SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt) { - MVT XLenVT = Subtarget->getXLenVT(); - if (N.getOpcode() == ISD::OR) { - SDValue Or = N; - if (Or.getOperand(0).getOpcode() == ISD::SHL) { - SDValue Shl = Or.getOperand(0); - if (isa(Shl.getOperand(1)) && - isa(Or.getOperand(1))) { - if (XLenVT == MVT::i64) { - uint64_t VC1 = Or.getConstantOperandVal(1); - uint64_t VC2 = Shl.getConstantOperandVal(1); - if (VC1 == maskTrailingOnes(VC2)) { - RS1 = Shl.getOperand(0); - Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), - Shl.getOperand(1).getValueType()); - return true; - } - } - if (XLenVT == MVT::i32) { - uint32_t VC1 = Or.getConstantOperandVal(1); - uint32_t VC2 = Shl.getConstantOperandVal(1); - if (VC1 == maskTrailingOnes(VC2)) { - RS1 = Shl.getOperand(0); - Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), - Shl.getOperand(1).getValueType()); - return true; - } - } - } - } +// VC1 == maskTrailingOnes(VC2) +// +bool RISCVDAGToDAGISel::MatchSLOI(SDNode *N) const { + assert(N->getOpcode() == ISD::OR); + assert(N->getOperand(0).getOpcode() == ISD::SHL); + assert(isa(N->getOperand(1))); + assert(isa(N->getOperand(0).getOperand(1))); + + SDValue Shl = N->getOperand(0); + if (Subtarget->is64Bit()) { + uint64_t VC1 = N->getConstantOperandVal(1); + uint64_t VC2 = Shl.getConstantOperandVal(1); + return VC1 == maskTrailingOnes(VC2); } - return false; + + uint32_t VC1 = N->getConstantOperandVal(1); + uint32_t VC2 = Shl.getConstantOperandVal(1); + return VC1 == maskTrailingOnes(VC2); } -// Check that it is a SROI (Shift Right Ones Immediate). We first check that -// it is the right node tree: +// Check that it is a SROI (Shift Right Ones Immediate). A PatFrag has already +// determined it has the right structure: // // (OR (SRL RS1, VC2), VC1) // -// and then we check that VC1, the mask used to fill with ones, is compatible +// Check that VC1, the mask used to fill with ones, is compatible // with VC2, the shamt: // -// VC1 == maskLeadingOnes(VC2) - -bool RISCVDAGToDAGISel::SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt) { - MVT XLenVT = Subtarget->getXLenVT(); - if (N.getOpcode() == ISD::OR) { - SDValue Or = N; - if (Or.getOperand(0).getOpcode() == ISD::SRL) { - SDValue Srl = Or.getOperand(0); - if (isa(Srl.getOperand(1)) && - isa(Or.getOperand(1))) { - if (XLenVT == MVT::i64) { - uint64_t VC1 = Or.getConstantOperandVal(1); - uint64_t VC2 = Srl.getConstantOperandVal(1); - if (VC1 == maskLeadingOnes(VC2)) { - RS1 = Srl.getOperand(0); - Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), - Srl.getOperand(1).getValueType()); - return true; - } - } - if (XLenVT == MVT::i32) { - uint32_t VC1 = Or.getConstantOperandVal(1); - uint32_t VC2 = Srl.getConstantOperandVal(1); - if (VC1 == maskLeadingOnes(VC2)) { - RS1 = Srl.getOperand(0); - Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), - Srl.getOperand(1).getValueType()); - return true; - } - } - } - } - } - return false; -} - -// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32 -// on RV64). -// SLLIUW is the same as SLLI except for the fact that it clears the bits -// XLEN-1:32 of the input RS1 before shifting. -// We first check that it is the right node tree: -// -// (AND (SHL RS1, VC2), VC1) -// -// We check that VC2, the shamt is less than 32, otherwise the pattern is -// exactly the same as SLLI and we give priority to that. -// Eventually we check that that VC1, the mask used to clear the upper 32 bits -// of RS1, is correct: +// VC1 == maskLeadingOnes(VC2) // -// VC1 == (0xFFFFFFFF << VC2) - -bool RISCVDAGToDAGISel::SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt) { - if (N.getOpcode() == ISD::AND && Subtarget->getXLenVT() == MVT::i64) { - SDValue And = N; - if (And.getOperand(0).getOpcode() == ISD::SHL) { - SDValue Shl = And.getOperand(0); - if (isa(Shl.getOperand(1)) && - isa(And.getOperand(1))) { - uint64_t VC1 = And.getConstantOperandVal(1); - uint64_t VC2 = Shl.getConstantOperandVal(1); - if (VC2 < 32 && VC1 == ((uint64_t)0xFFFFFFFF << VC2)) { - RS1 = Shl.getOperand(0); - Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), - Shl.getOperand(1).getValueType()); - return true; - } - } - } +bool RISCVDAGToDAGISel::MatchSROI(SDNode *N) const { + assert(N->getOpcode() == ISD::OR); + assert(N->getOperand(0).getOpcode() == ISD::SRL); + assert(isa(N->getOperand(1))); + assert(isa(N->getOperand(0).getOperand(1))); + + SDValue Srl = N->getOperand(0); + if (Subtarget->is64Bit()) { + uint64_t VC1 = N->getConstantOperandVal(1); + uint64_t VC2 = Srl.getConstantOperandVal(1); + return VC1 == maskLeadingOnes(VC2); } - return false; + + uint32_t VC1 = N->getConstantOperandVal(1); + uint32_t VC2 = Srl.getConstantOperandVal(1); + return VC1 == maskLeadingOnes(VC2); } -// Check that it is a SLOIW (Shift Left Ones Immediate i32 on RV64). -// We first check that it is the right node tree: +// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). A PatFrag +// has already determined it has the right structure: // -// (SIGN_EXTEND_INREG (OR (SHL RS1, VC2), VC1)) +// (OR (SRL RS1, VC2), VC1) // // and then we check that VC1, the mask used to fill with ones, is compatible // with VC2, the shamt: // // VC2 < 32 -// VC1 == maskTrailingOnes(VC2) - -bool RISCVDAGToDAGISel::SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt) { - assert(Subtarget->is64Bit() && "SLOIW should only be matched on RV64"); - if (N.getOpcode() != ISD::SIGN_EXTEND_INREG || - cast(N.getOperand(1))->getVT() != MVT::i32) +// VC1 == maskTrailingZeros(32 - VC2) +// +bool RISCVDAGToDAGISel::MatchSROIW(SDNode *N) const { + assert(N->getOpcode() == ISD::OR); + assert(N->getOperand(0).getOpcode() == ISD::SRL); + assert(isa(N->getOperand(1))); + assert(isa(N->getOperand(0).getOperand(1))); + + // The IsRV64 predicate is checked after PatFrag predicates so we can get + // here even on RV32. + if (!Subtarget->is64Bit()) return false; - SDValue Or = N.getOperand(0); - - if (Or.getOpcode() != ISD::OR || !isa(Or.getOperand(1))) - return false; - - SDValue Shl = Or.getOperand(0); - if (Shl.getOpcode() != ISD::SHL || !isa(Shl.getOperand(1))) - return false; - - uint64_t VC1 = Or.getConstantOperandVal(1); - uint64_t VC2 = Shl.getConstantOperandVal(1); - - if (VC2 >= 32 || VC1 != maskTrailingOnes(VC2)) - return false; + SDValue Srl = N->getOperand(0); + uint64_t VC1 = N->getConstantOperandVal(1); + uint64_t VC2 = Srl.getConstantOperandVal(1); - RS1 = Shl.getOperand(0); - Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), - Shl.getOperand(1).getValueType()); - return true; + // Immediate range should be enforced by uimm5 predicate. + assert(VC2 < 32 && "Unexpected immediate"); + return VC1 == maskTrailingZeros(32 - VC2); } -// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). -// We first check that it is the right node tree: +// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32 +// on RV64). +// SLLIUW is the same as SLLI except for the fact that it clears the bits +// XLEN-1:32 of the input RS1 before shifting. +// A PatFrag has already checked that it has the right structure: // -// (OR (SRL RS1, VC2), VC1) +// (AND (SHL RS1, VC2), VC1) // -// and then we check that VC1, the mask used to fill with ones, is compatible -// with VC2, the shamt: +// We check that VC2, the shamt is less than 32, otherwise the pattern is +// exactly the same as SLLI and we give priority to that. +// Eventually we check that VC1, the mask used to clear the upper 32 bits +// of RS1, is correct: // -// VC2 < 32 -// VC1 == maskTrailingZeros(32 - VC2) +// VC1 == (0xFFFFFFFF << VC2) // -bool RISCVDAGToDAGISel::SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt) { - assert(Subtarget->is64Bit() && "SROIW should only be matched on RV64"); - if (N.getOpcode() != ISD::OR || !isa(N.getOperand(1))) - return false; - - SDValue Srl = N.getOperand(0); - if (Srl.getOpcode() != ISD::SRL || !isa(Srl.getOperand(1))) +bool RISCVDAGToDAGISel::MatchSLLIUW(SDNode *N) const { + assert(N->getOpcode() == ISD::AND); + assert(N->getOperand(0).getOpcode() == ISD::SHL); + assert(isa(N->getOperand(1))); + assert(isa(N->getOperand(0).getOperand(1))); + + // The IsRV64 predicate is checked after PatFrag predicates so we can get + // here even on RV32. + if (!Subtarget->is64Bit()) return false; - uint64_t VC1 = N.getConstantOperandVal(1); - uint64_t VC2 = Srl.getConstantOperandVal(1); + SDValue Shl = N->getOperand(0); + uint64_t VC1 = N->getConstantOperandVal(1); + uint64_t VC2 = Shl.getConstantOperandVal(1); - if (VC2 >= 32 || VC1 != maskTrailingZeros(32 - VC2)) - return false; - - RS1 = Srl.getOperand(0); - Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), - Srl.getOperand(1).getValueType()); - return true; + // Immediate range should be enforced by uimm5 predicate. + assert(VC2 < 32 && "Unexpected immediate"); + return VC1 == ((uint64_t)0xFFFFFFFF << VC2); } bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -850,6 +850,11 @@ [(and node:$src, 0xffffffff), (assertzexti32 node:$src)]>; +def SRLIWPat : PatFrag<(ops node:$A, node:$B), + (srl (and node:$A, imm), node:$B), [{ + return MatchSRLIW(N); +}]>; + /// Immediates def : Pat<(simm12:$imm), (ADDI X0, simm12:$imm)>; @@ -1168,8 +1173,8 @@ (SUBW GPR:$rs1, GPR:$rs2)>; def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32), (SLLIW GPR:$rs1, uimm5:$shamt)>; -// (srl (zexti32 ...), uimm5:$shamt) is matched with custom code due to the -// need to undo manipulation of the mask value performed by DAGCombine. +def : Pat<(SRLIWPat GPR:$rs1, uimm5:$shamt), + (SRLIW GPR:$rs1, uimm5:$shamt)>; def : Pat<(srl (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt), (SRLIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>; def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -59,6 +59,31 @@ N->getValueType(0)); }]>; +// Check that it is a SLOI (Shift Left Ones Immediate). +def SLOIPat : PatFrag<(ops node:$A, node:$B), + (or (shl node:$A, node:$B), imm), [{ + return MatchSLOI(N); +}]>; + +// Check that it is a SROI (Shift Right Ones Immediate). +def SROIPat : PatFrag<(ops node:$A, node:$B), + (or (srl node:$A, node:$B), imm), [{ + return MatchSROI(N); +}]>; + +// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). +def SROIWPat : PatFrag<(ops node:$A, node:$B), + (or (srl node:$A, node:$B), imm), [{ + return MatchSROIW(N); +}]>; + +// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32 +// on RV64). +def SLLIUWPat : PatFrag<(ops node:$A, node:$B), + (and (shl node:$A, node:$B), imm), [{ + return MatchSLLIUW(N); +}]>; + // Checks if this mask has a single 0 bit and cannot be used with ANDI. def SBCLRMask : ImmLeafis64Bit()) @@ -691,11 +716,6 @@ //===----------------------------------------------------------------------===// // Codegen patterns //===----------------------------------------------------------------------===// -def SLOIPat : ComplexPattern; -def SROIPat : ComplexPattern; -def SLLIUWPat : ComplexPattern; -def SLOIWPat : ComplexPattern; -def SROIWPat : ComplexPattern; let Predicates = [HasStdExtZbbOrZbp] in { def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>; @@ -900,8 +920,8 @@ let Predicates = [HasStdExtZbb, IsRV64] in { def : Pat<(and (add GPR:$rs, simm12:$simm12), (i64 0xFFFFFFFF)), (ADDIWU GPR:$rs, simm12:$simm12)>; -def : Pat<(SLLIUWPat GPR:$rs1, uimmlog2xlen:$shamt), - (SLLIUW GPR:$rs1, uimmlog2xlen:$shamt)>; +def : Pat<(SLLIUWPat GPR:$rs1, uimm5:$shamt), + (SLLIUW GPR:$rs1, uimm5:$shamt)>; def : Pat<(and (add GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)), (ADDWU GPR:$rs1, GPR:$rs2)>; def : Pat<(and (sub GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)), @@ -956,10 +976,10 @@ } // Predicates = [HasStdExtZbs, IsRV64] let Predicates = [HasStdExtZbb, IsRV64] in { -def : Pat<(SLOIWPat GPR:$rs1, uimmlog2xlen:$shamt), - (SLOIW GPR:$rs1, uimmlog2xlen:$shamt)>; -def : Pat<(SROIWPat GPR:$rs1, uimmlog2xlen:$shamt), - (SROIW GPR:$rs1, uimmlog2xlen:$shamt)>; +def : Pat<(sext_inreg (SLOIPat GPR:$rs1, uimm5:$shamt), i32), + (SLOIW GPR:$rs1, uimm5:$shamt)>; +def : Pat<(SROIWPat GPR:$rs1, uimm5:$shamt), + (SROIW GPR:$rs1, uimm5:$shamt)>; } // Predicates = [HasStdExtZbb, IsRV64] let Predicates = [HasStdExtZbp, IsRV64] in {