diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -37,6 +37,9 @@ def SDT_RISCVIntUnaryOpW : SDTypeProfile<1, 1, [ SDTCisSameAs<0, 1>, SDTCisVT<0, i64> ]>; +def SDT_RISCVIntBinOp : SDTypeProfile<1, 2, [ + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, XLenVT> +]>; def SDT_RISCVIntBinOpW : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64> ]>; @@ -836,13 +839,13 @@ /// Generic pattern classes class PatGpr - : Pat<(OpNode GPR:$rs1), (Inst GPR:$rs1)>; + : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1))), (Inst GPR:$rs1)>; class PatGprGpr - : Pat<(OpNode GPR:$rs1, GPR:$rs2), (Inst GPR:$rs1, GPR:$rs2)>; + : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>; class PatGprSimm12 - : Pat<(OpNode GPR:$rs1, simm12:$imm12), (Inst GPR:$rs1, simm12:$imm12)>; + : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), simm12:$imm12)), (Inst GPR:$rs1, simm12:$imm12)>; class PatGprUimmLog2XLen - : Pat<(OpNode GPR:$rs1, uimmlog2xlen:$shamt), + : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), uimmlog2xlen:$shamt)), (Inst GPR:$rs1, uimmlog2xlen:$shamt)>; /// Predicates @@ -893,7 +896,7 @@ (operator node:$val, (XLenVT (shiftMaskXLen node:$count)))>; class shiftopw : PatFrag<(ops node:$val, node:$count), - (operator node:$val, (i64 (shiftMask32 node:$count)))>; + (operator node:$val, (i64 (shiftMask32 (i64 node:$count))))>; def : PatGprGpr, SLL>; def : PatGprGpr, SRL>; @@ -924,36 +927,45 @@ // Define pattern expansions for setcc operations that aren't directly // handled by a RISC-V instruction. -def : Pat<(seteq GPR:$rs1, 0), (SLTIU GPR:$rs1, 1)>; -def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>; -def : Pat<(seteq GPR:$rs1, simm12_plus1:$imm12), +def : Pat<(XLenVT (seteq (XLenVT GPR:$rs1), 0)), (SLTIU GPR:$rs1, 1)>; +def : Pat<(XLenVT (seteq (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>; +def : Pat<(XLenVT (seteq (XLenVT GPR:$rs1), simm12_plus1:$imm12)), (SLTIU (ADDI GPR:$rs1, (NegImm simm12_plus1:$imm12)), 1)>; -def : Pat<(setne GPR:$rs1, 0), (SLTU X0, GPR:$rs1)>; -def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0, (XOR GPR:$rs1, GPR:$rs2))>; -def : Pat<(setne GPR:$rs1, simm12_plus1:$imm12), - (SLTU X0, (ADDI GPR:$rs1, (NegImm simm12_plus1:$imm12)))>; -def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>; -def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>; -def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>; -def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>; -def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>; -def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>; +def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), 0)), (SLTU (XLenVT X0), GPR:$rs1)>; +def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (SLTU (XLenVT X0), (XOR GPR:$rs1, GPR:$rs2))>; +def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), simm12_plus1:$imm12)), + (SLTU (XLenVT X0), (ADDI GPR:$rs1, (NegImm simm12_plus1:$imm12)))>; +def : Pat<(XLenVT (setugt (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (SLTU GPR:$rs2, GPR:$rs1)>; +def : Pat<(XLenVT (setuge (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>; +def : Pat<(XLenVT (setule (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>; +def : Pat<(XLenVT (setgt (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (SLT GPR:$rs2, GPR:$rs1)>; +def : Pat<(XLenVT (setge (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>; +def : Pat<(XLenVT (setle (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>; let usesCustomInserter = 1 in -class SelectCC_rrirr +class SelectCC_rrirr : Pseudo<(outs valty:$dst), (ins cmpty:$lhs, cmpty:$rhs, ixlenimm:$imm, valty:$truev, valty:$falsev), - [(set valty:$dst, (riscv_selectcc cmpty:$lhs, cmpty:$rhs, - (XLenVT imm:$imm), valty:$truev, valty:$falsev))]>; + [(set valty:$dst, (riscv_selectcc (cmpvt cmpty:$lhs), (cmpvt cmpty:$rhs), + (XLenVT imm:$imm), (valvt valty:$truev), (valvt valty:$falsev)))]>; -def Select_GPR_Using_CC_GPR : SelectCC_rrirr; +def Select_GPR_Using_CC_GPR : SelectCC_rrirr; /// Branches and jumps // Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction. class BccPat - : Pat<(riscv_brcc GPR:$rs1, GPR:$rs2, Cond, bb:$imm12), + : Pat<(riscv_brcc (XLenVT GPR:$rs1), (XLenVT GPR:$rs2), Cond, bb:$imm12), (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>; def : BccPat; @@ -1000,9 +1012,9 @@ def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>; def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>; -def : Pat<(riscv_uret_flag), (URET X0, X0)>; -def : Pat<(riscv_sret_flag), (SRET X0, X0)>; -def : Pat<(riscv_mret_flag), (MRET X0, X0)>; +def : Pat<(riscv_uret_flag), (URET (XLenVT X0), (XLenVT X0))>; +def : Pat<(riscv_sret_flag), (SRET (XLenVT X0), (XLenVT X0))>; +def : Pat<(riscv_mret_flag), (MRET (XLenVT X0), (XLenVT X0))>; let isCall = 1, Defs = [X1] in def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rs1), @@ -1253,7 +1265,7 @@ /// readcyclecounter // On RV64, we can directly read the 64-bit "cycle" CSR. let Predicates = [IsRV64] in -def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, X0)>; +def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, (XLenVT X0))>; // On RV32, ReadCycleWide will be expanded to the suggested loop reading both // halves of the 64-bit "cycle" CSR. let Predicates = [IsRV32], usesCustomInserter = 1, hasNoSchedulingInfo = 1 in diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td @@ -167,16 +167,16 @@ defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">; defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">; -def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr), - (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr), - (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr), - (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr), - (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr), - (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr)), + (AMOADD_W GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_acquire GPR:$addr, GPR:$incr)), + (AMOADD_W_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_release GPR:$addr, GPR:$incr)), + (AMOADD_W_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr)), + (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr)), + (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; /// Pseudo AMOs @@ -191,15 +191,15 @@ def PseudoAtomicLoadNand32 : PseudoAMO; // Ordering constants must be kept in sync with the AtomicOrdering enum in // AtomicOrdering.h. -def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>; -def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_acquire GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>; -def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_release GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>; -def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>; -def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>; class PseudoMaskedAMO @@ -281,15 +281,15 @@ // Ordering constants must be kept in sync with the AtomicOrdering enum in // AtomicOrdering.h. multiclass PseudoCmpXchgPat { - def : Pat<(!cast(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new), + def : Pat<(XLenVT (!cast(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)), (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>; - def : Pat<(!cast(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new), + def : Pat<(XLenVT (!cast(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new)), (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>; - def : Pat<(!cast(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new), + def : Pat<(XLenVT (!cast(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new)), (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>; - def : Pat<(!cast(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new), + def : Pat<(XLenVT (!cast(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)), (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>; - def : Pat<(!cast(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new), + def : Pat<(XLenVT (!cast(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)), (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>; } @@ -335,15 +335,15 @@ /// 64-bit AMOs def : Pat<(i64 (atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr)), - (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_acquire GPR:$addr, GPR:$incr)), - (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_release GPR:$addr, GPR:$incr)), - (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr)), - (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr)), - (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; /// 64-bit pseudo AMOs diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -25,11 +25,11 @@ def riscv_fsrw : SDNode<"RISCVISD::FSRW", SDT_RISCVIntShiftDOpW>; def riscv_fsl : SDNode<"RISCVISD::FSL", SDTIntShiftDOp>; def riscv_fsr : SDNode<"RISCVISD::FSR", SDTIntShiftDOp>; -def riscv_grevi : SDNode<"RISCVISD::GREVI", SDTIntBinOp>; +def riscv_grevi : SDNode<"RISCVISD::GREVI", SDT_RISCVIntBinOp>; def riscv_greviw : SDNode<"RISCVISD::GREVIW", SDT_RISCVIntBinOpW>; -def riscv_gorci : SDNode<"RISCVISD::GORCI", SDTIntBinOp>; +def riscv_gorci : SDNode<"RISCVISD::GORCI", SDT_RISCVIntBinOp>; def riscv_gorciw : SDNode<"RISCVISD::GORCIW", SDT_RISCVIntBinOpW>; -def riscv_shfli : SDNode<"RISCVISD::SHFLI", SDTIntBinOp>; +def riscv_shfli : SDNode<"RISCVISD::SHFLI", SDT_RISCVIntBinOp>; def UImmLog2XLenHalfAsmOperand : AsmOperandClass { let Name = "UImmLog2XLenHalf"; @@ -674,29 +674,29 @@ //===----------------------------------------------------------------------===// let Predicates = [HasStdExtZbbOrZbp] in { -def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>; -def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN GPR:$rs1, GPR:$rs2)>; -def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (or GPR:$rs1, (not GPR:$rs2))), (ORN GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbbOrZbp] let Predicates = [HasStdExtZbbOrZbp] in { -def : Pat<(rotl GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>; -def : Pat<(rotr GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (rotl GPR:$rs1, (XLenVT GPR:$rs2))), (ROL GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (rotr GPR:$rs1, (XLenVT GPR:$rs2))), (ROR GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbbOrZbp] let Predicates = [HasStdExtZbs] in { -def : Pat<(and (not (shiftop 1, GPR:$rs2)), GPR:$rs1), +def : Pat<(XLenVT (and (not (shiftop 1, (XLenVT GPR:$rs2))), GPR:$rs1)), (BCLR GPR:$rs1, GPR:$rs2)>; -def : Pat<(and (rotl -2, GPR:$rs2), GPR:$rs1), (BCLR GPR:$rs1, GPR:$rs2)>; -def : Pat<(or (shiftop 1, GPR:$rs2), GPR:$rs1), +def : Pat<(XLenVT (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)), (BCLR GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (or (shiftop 1, (XLenVT GPR:$rs2)), GPR:$rs1)), (BSET GPR:$rs1, GPR:$rs2)>; -def : Pat<(xor (shiftop 1, GPR:$rs2), GPR:$rs1), +def : Pat<(XLenVT (xor (shiftop 1, (XLenVT GPR:$rs2)), GPR:$rs1)), (BINV GPR:$rs1, GPR:$rs2)>; -def : Pat<(and (shiftop GPR:$rs1, GPR:$rs2), 1), +def : Pat<(XLenVT (and (shiftop GPR:$rs1, (XLenVT GPR:$rs2)), 1)), (BEXT GPR:$rs1, GPR:$rs2)>; -def : Pat<(shiftop 1, GPR:$rs2), - (BSET X0, GPR:$rs2)>; +def : Pat<(XLenVT (shiftop 1, (XLenVT GPR:$rs2))), + (BSET (XLenVT X0), GPR:$rs2)>; def : Pat<(and GPR:$rs1, BCLRMask:$mask), (BCLRI GPR:$rs1, BCLRMask:$mask)>; @@ -712,9 +712,9 @@ // There's no encoding for roli in the the 'B' extension as it can be // implemented with rori by negating the immediate. let Predicates = [HasStdExtZbbOrZbp] in { -def : Pat<(rotr GPR:$rs1, uimmlog2xlen:$shamt), +def : Pat<(XLenVT (rotr GPR:$rs1, uimmlog2xlen:$shamt)), (RORI GPR:$rs1, uimmlog2xlen:$shamt)>; -def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt), +def : Pat<(XLenVT (rotl GPR:$rs1, uimmlog2xlen:$shamt)), (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>; // We treat orc.b as a separate instruction, so match it directly. We also @@ -742,30 +742,30 @@ } // Predicates = [HasStdExtZbp, IsRV64] let Predicates = [HasStdExtZbt] in { -def : Pat<(or (and (not GPR:$rs2), GPR:$rs3), (and GPR:$rs2, GPR:$rs1)), +def : Pat<(XLenVT (or (and (not GPR:$rs2), GPR:$rs3), (and GPR:$rs2, GPR:$rs1))), (CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(select (XLenVT (setne GPR:$rs2, 0)), GPR:$rs1, GPR:$rs3), +def : Pat<(XLenVT (select (XLenVT (setne (XLenVT GPR:$rs2), 0)), GPR:$rs1, GPR:$rs3)), (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(select (XLenVT (seteq GPR:$rs2, 0)), GPR:$rs3, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT (seteq (XLenVT GPR:$rs2), 0)), GPR:$rs3, GPR:$rs1)), (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(select (XLenVT (setne GPR:$x, simm12_plus1:$y)), GPR:$rs1, GPR:$rs3), +def : Pat<(XLenVT (select (XLenVT (setne (XLenVT GPR:$x), simm12_plus1:$y)), GPR:$rs1, GPR:$rs3)), (CMOV GPR:$rs1, (ADDI GPR:$x, (NegImm simm12_plus1:$y)), GPR:$rs3)>; -def : Pat<(select (XLenVT (seteq GPR:$x, simm12_plus1:$y)), GPR:$rs3, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT (seteq (XLenVT GPR:$x), simm12_plus1:$y)), GPR:$rs3, GPR:$rs1)), (CMOV GPR:$rs1, (ADDI GPR:$x, (NegImm simm12_plus1:$y)), GPR:$rs3)>; -def : Pat<(select (XLenVT (setne GPR:$x, GPR:$y)), GPR:$rs1, GPR:$rs3), +def : Pat<(XLenVT (select (XLenVT (setne (XLenVT GPR:$x), GPR:$y)), GPR:$rs1, GPR:$rs3)), (CMOV GPR:$rs1, (XOR GPR:$x, GPR:$y), GPR:$rs3)>; -def : Pat<(select (XLenVT (seteq GPR:$x, GPR:$y)), GPR:$rs3, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT (seteq (XLenVT GPR:$x), GPR:$y)), GPR:$rs3, GPR:$rs1)), (CMOV GPR:$rs1, (XOR GPR:$x, GPR:$y), GPR:$rs3)>; -def : Pat<(select (XLenVT (setuge GPR:$x, GPR:$y)), GPR:$rs3, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT (setuge (XLenVT GPR:$x), GPR:$y)), GPR:$rs3, GPR:$rs1)), (CMOV GPR:$rs1, (SLTU GPR:$x, GPR:$y), GPR:$rs3)>; -def : Pat<(select (XLenVT (setule GPR:$y, GPR:$x)), GPR:$rs3, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT (setule (XLenVT GPR:$y), GPR:$x)), GPR:$rs3, GPR:$rs1)), (CMOV GPR:$rs1, (SLTU GPR:$x, GPR:$y), GPR:$rs3)>; -def : Pat<(select (XLenVT (setge GPR:$x, GPR:$y)), GPR:$rs3, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT (setge (XLenVT GPR:$x), GPR:$y)), GPR:$rs3, GPR:$rs1)), (CMOV GPR:$rs1, (SLT GPR:$x, GPR:$y), GPR:$rs3)>; -def : Pat<(select (XLenVT (setle GPR:$y, GPR:$x)), GPR:$rs3, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT (setle (XLenVT GPR:$y), GPR:$x)), GPR:$rs3, GPR:$rs1)), (CMOV GPR:$rs1, (SLT GPR:$x, GPR:$y), GPR:$rs3)>; -def : Pat<(select GPR:$rs2, GPR:$rs1, GPR:$rs3), +def : Pat<(XLenVT (select (XLenVT GPR:$rs2), GPR:$rs1, GPR:$rs3)), (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>; } // Predicates = [HasStdExtZbt] @@ -774,23 +774,23 @@ // shift of zero, fshr will return its second operand. fsl and fsr both return // $rs1 so the patterns need to have different operand orders. let Predicates = [HasStdExtZbt] in { -def : Pat<(riscv_fsl GPR:$rs1, GPR:$rs3, GPR:$rs2), +def : Pat<(XLenVT (riscv_fsl GPR:$rs1, GPR:$rs3, (XLenVT GPR:$rs2))), (FSL GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(riscv_fsr GPR:$rs3, GPR:$rs1, GPR:$rs2), +def : Pat<(XLenVT (riscv_fsr GPR:$rs3, GPR:$rs1, (XLenVT GPR:$rs2))), (FSR GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(fshr GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt), +def : Pat<(XLenVT (fshr GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt)), (FSRI GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt)>; // We can use FSRI for fshl by immediate if we subtract the immediate from // XLen and swap the operands. -def : Pat<(fshl GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt), +def : Pat<(XLenVT (fshl GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt)), (FSRI GPR:$rs1, GPR:$rs3, (ImmSubFromXLen uimmlog2xlen:$shamt))>; } // Predicates = [HasStdExtZbt] let Predicates = [HasStdExtZbb] in { -def : Pat<(ctlz GPR:$rs1), (CLZ GPR:$rs1)>; -def : Pat<(cttz GPR:$rs1), (CTZ GPR:$rs1)>; -def : Pat<(ctpop GPR:$rs1), (CPOP GPR:$rs1)>; +def : Pat<(XLenVT (ctlz (XLenVT GPR:$rs1))), (CLZ GPR:$rs1)>; +def : Pat<(XLenVT (cttz (XLenVT GPR:$rs1))), (CTZ GPR:$rs1)>; +def : Pat<(XLenVT (ctpop (XLenVT GPR:$rs1))), (CPOP GPR:$rs1)>; } // Predicates = [HasStdExtZbb] let Predicates = [HasStdExtZbb] in { @@ -799,10 +799,10 @@ } let Predicates = [HasStdExtZbb] in { -def : Pat<(smin GPR:$rs1, GPR:$rs2), (MIN GPR:$rs1, GPR:$rs2)>; -def : Pat<(smax GPR:$rs1, GPR:$rs2), (MAX GPR:$rs1, GPR:$rs2)>; -def : Pat<(umin GPR:$rs1, GPR:$rs2), (MINU GPR:$rs1, GPR:$rs2)>; -def : Pat<(umax GPR:$rs1, GPR:$rs2), (MAXU GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (smin GPR:$rs1, GPR:$rs2)), (MIN GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (smax GPR:$rs1, GPR:$rs2)), (MAX GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (umin GPR:$rs1, GPR:$rs2)), (MINU GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (umax GPR:$rs1, GPR:$rs2)), (MAXU GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbb] let Predicates = [HasStdExtZbb, IsRV32] in { @@ -827,7 +827,7 @@ (PACKU GPR:$rs1, GPR:$rs2)>; } let Predicates = [HasStdExtZbp] in -def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF), +def : Pat<(or (and (shl (XLenVT GPR:$rs2), (XLenVT 8)), 0xFFFF), (and GPR:$rs1, 0x00FF)), (PACKH GPR:$rs1, GPR:$rs2)>; @@ -838,11 +838,11 @@ } let Predicates = [HasStdExtZba] in { -def : Pat<(add (shl GPR:$rs1, (XLenVT 1)), GPR:$rs2), +def : Pat<(XLenVT (add (shl GPR:$rs1, (XLenVT 1)), GPR:$rs2)), (SH1ADD GPR:$rs1, GPR:$rs2)>; -def : Pat<(add (shl GPR:$rs1, (XLenVT 2)), GPR:$rs2), +def : Pat<(XLenVT (add (shl GPR:$rs1, (XLenVT 2)), GPR:$rs2)), (SH2ADD GPR:$rs1, GPR:$rs2)>; -def : Pat<(add (shl GPR:$rs1, (XLenVT 3)), GPR:$rs2), +def : Pat<(XLenVT (add (shl GPR:$rs1, (XLenVT 3)), GPR:$rs2)), (SH3ADD GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZba] @@ -853,7 +853,7 @@ (SLLIUW GPR:$rs1, uimm5:$shamt)>; def : Pat<(i64 (add (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)), (ADDUW GPR:$rs1, GPR:$rs2)>; -def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADDUW GPR:$rs, X0)>; +def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADDUW GPR:$rs, (i64 X0))>; def : Pat<(i64 (add (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 1)), GPR:$rs2)), (SH1ADDUW GPR:$rs1, GPR:$rs2)>; @@ -902,7 +902,7 @@ let Predicates = [HasStdExtZbb, IsRV64] in { def : Pat<(riscv_clzw GPR:$rs1), (CLZW GPR:$rs1)>; def : Pat<(riscv_ctzw GPR:$rs1), (CTZW GPR:$rs1)>; -def : Pat<(i64 (ctpop (and GPR:$rs1, 0xFFFFFFFF))), (CPOPW GPR:$rs1)>; +def : Pat<(i64 (ctpop (and (i64 GPR:$rs1), 0xFFFFFFFF))), (CPOPW GPR:$rs1)>; } // Predicates = [HasStdExtZbb, IsRV64] let Predicates = [HasStdExtZbp, IsRV64] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -230,8 +230,8 @@ // Pseudo-instructions and codegen patterns //===----------------------------------------------------------------------===// -class PatFpr64Fpr64 - : Pat<(OpNode FPR64:$rs1, FPR64:$rs2), (Inst $rs1, $rs2)>; +class PatFpr64Fpr64 + : Pat<(vt (OpNode FPR64:$rs1, FPR64:$rs2)), (Inst $rs1, $rs2)>; class PatFpr64Fpr64DynFrm : Pat<(OpNode FPR64:$rs1, FPR64:$rs2), (Inst $rs1, $rs2, 0b111)>; @@ -259,7 +259,7 @@ def : Pat<(fneg FPR64:$rs1), (FSGNJN_D $rs1, $rs1)>; def : Pat<(fabs FPR64:$rs1), (FSGNJX_D $rs1, $rs1)>; -def : PatFpr64Fpr64; +def : PatFpr64Fpr64; def : Pat<(fcopysign FPR64:$rs1, (fneg FPR64:$rs2)), (FSGNJN_D $rs1, $rs2)>; def : Pat<(fcopysign FPR64:$rs1, FPR32:$rs2), (FSGNJ_D $rs1, (FCVT_D_S $rs2))>; def : Pat<(fcopysign FPR32:$rs1, FPR64:$rs2), (FSGNJ_S $rs1, (FCVT_S_D $rs2, @@ -287,8 +287,8 @@ // draft 2.3 ISA spec changes the definition of fmin and fmax in a way that // matches LLVM's fminnum and fmaxnum // . -def : PatFpr64Fpr64; -def : PatFpr64Fpr64; +def : PatFpr64Fpr64; +def : PatFpr64Fpr64; /// Setcc @@ -299,7 +299,7 @@ def : PatFpr64Fpr64; def : PatFpr64Fpr64; -def Select_FPR64_Using_CC_GPR : SelectCC_rrirr; +def Select_FPR64_Using_CC_GPR : SelectCC_rrirr; /// Loads diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -290,8 +290,8 @@ def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>; /// Generic pattern classes -class PatFpr32Fpr32 - : Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2)>; +class PatFpr32Fpr32 + : Pat<(vt (OpNode FPR32:$rs1, FPR32:$rs2)), (Inst $rs1, $rs2)>; class PatFpr32Fpr32DynFrm : Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2, 0b111)>; @@ -299,7 +299,7 @@ let Predicates = [HasStdExtF] in { /// Float constants -def : Pat<(f32 (fpimm0)), (FMV_W_X X0)>; +def : Pat<(f32 (fpimm0)), (FMV_W_X (XLenVT X0))>; /// Float conversion operations @@ -318,7 +318,7 @@ def : Pat<(fneg FPR32:$rs1), (FSGNJN_S $rs1, $rs1)>; def : Pat<(fabs FPR32:$rs1), (FSGNJX_S $rs1, $rs1)>; -def : PatFpr32Fpr32; +def : PatFpr32Fpr32; def : Pat<(fcopysign FPR32:$rs1, (fneg FPR32:$rs2)), (FSGNJN_S $rs1, $rs2)>; // fmadd: rs1 * rs2 + rs3 @@ -343,8 +343,8 @@ // draft 2.3 ISA spec changes the definition of fmin and fmax in a way that // matches LLVM's fminnum and fmaxnum // . -def : PatFpr32Fpr32; -def : PatFpr32Fpr32; +def : PatFpr32Fpr32; +def : PatFpr32Fpr32; /// Setcc @@ -355,7 +355,7 @@ def : PatFpr32Fpr32; def : PatFpr32Fpr32; -def Select_FPR32_Using_CC_GPR : SelectCC_rrirr; +def Select_FPR32_Using_CC_GPR : SelectCC_rrirr; /// Loads diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td @@ -92,7 +92,7 @@ // Although the sexti32 operands may not have originated from an i32 srem, // this pattern is safe as it is impossible for two sign extended inputs to // produce a result where res[63:32]=0 and res[31]=1. -def : Pat<(srem (sexti32 (i64 GPR:$rs1)), (sexti32 (i64 GPR:$rs2))), +def : Pat<(i64 (srem (sexti32 (i64 GPR:$rs1)), (sexti32 (i64 GPR:$rs2)))), (REMW GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtM, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -15,7 +15,7 @@ //===----------------------------------------------------------------------===// def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S", - SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>, + SDTypeProfile<1, 1, [SDTCisVT<0, XLenVT>, SDTCisVec<1>, SDTCisInt<1>]>>; def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB", SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>; @@ -2132,7 +2132,7 @@ VReg op2_reg_class> : Pat<(result_type (!cast(intrinsic_name) (op2_type op2_reg_class:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (op2_type op2_reg_class:$rs2), GPR:$vl, sew)>; @@ -2151,7 +2151,7 @@ (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), (mask_type V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), @@ -2162,7 +2162,7 @@ MTypeInfo mti> : Pat<(mti.Mask (!cast(intrinsic_name) (mti.Mask VR:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_M_"#mti.BX) (mti.Mask VR:$rs2), GPR:$vl, mti.SEW)>; @@ -2174,7 +2174,7 @@ (mti.Mask VR:$merge), (mti.Mask VR:$rs2), (mti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_M_"#mti.BX#"_MASK") (mti.Mask VR:$merge), (mti.Mask VR:$rs2), @@ -2194,7 +2194,7 @@ (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (mask_type VR:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), @@ -2212,7 +2212,7 @@ Pat<(result_type (!cast(intrinsic_name) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -2233,7 +2233,7 @@ (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_MASK") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), @@ -2256,7 +2256,7 @@ (result_type result_reg_class:$rs3), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_"#kind#"_"#vlmul.MX) result_reg_class:$rs3, (op1_type op1_reg_class:$rs1), @@ -2280,7 +2280,7 @@ (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_"#kind#"_"#vlmul.MX # "_MASK") result_reg_class:$rs3, (op1_type op1_reg_class:$rs1), @@ -2300,7 +2300,7 @@ GPR:$rs1, (op1_type op1_reg_class:$vs2), (result_type vlmul.vrclass:$vd), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst # "_WD_" # vlmul.MX # "_" # emul.MX) $rs1, $vs2, $vd, GPR:$vl, sew)>; @@ -2319,7 +2319,7 @@ (op1_type op1_reg_class:$vs2), (result_type vlmul.vrclass:$vd), (mask_type V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK") $rs1, $vs2, $vd, (mask_type V0), GPR:$vl, sew)>; @@ -2329,11 +2329,11 @@ { foreach mti = AllMasks in { def : Pat<(XLenVT (!cast(intrinsic_name) - (mti.Mask VR:$rs1), (XLenVT (VLOp GPR:$vl)))), + (mti.Mask VR:$rs1), (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_M_"#mti.BX) $rs1, GPR:$vl, mti.SEW)>; def : Pat<(XLenVT (!cast(intrinsic_name # "_mask") - (mti.Mask VR:$rs1), (mti.Mask V0), (XLenVT (VLOp GPR:$vl)))), + (mti.Mask VR:$rs1), (mti.Mask V0), (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_M_"#mti.BX#"_MASK") $rs1, (mti.Mask V0), GPR:$vl, mti.SEW)>; } @@ -2400,12 +2400,12 @@ { foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (!cast(intrinsic) - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(instruction#"_V_" # vti.LMul.MX) GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (!cast(intrinsic # "_mask") (vti.Vector vti.RegClass:$merge), - (vti.Mask V0), (XLenVT (VLOp GPR:$vl)))), + (vti.Mask V0), (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(instruction#"_V_" # vti.LMul.MX # "_MASK") vti.RegClass:$merge, (vti.Mask V0), GPR:$vl, vti.SEW)>; @@ -2415,7 +2415,7 @@ multiclass VPatNullaryM { foreach mti = AllMasks in def : Pat<(mti.Mask (!cast(intrinsic) - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT (XLenVT GPR:$vl)))))), (!cast(inst#"_M_"#mti.BX) GPR:$vl, mti.SEW)>; } @@ -2454,7 +2454,7 @@ (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -2475,7 +2475,7 @@ def : Pat<(result_type (!cast(intrinsic) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -3248,7 +3248,7 @@ // consistency. def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, @@ -3257,7 +3257,7 @@ (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, @@ -3269,7 +3269,7 @@ // Match VSUB with a small immediate to vadd.vi by negating the immediate. def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, (NegImm simm5_plus1:$rs2), GPR:$vl, @@ -3278,7 +3278,7 @@ (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), (vti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVADD_VI_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, @@ -3885,7 +3885,7 @@ foreach vti = AllIntegerVectors in { def : Pat<(vti.Mask (int_riscv_vmsgt (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLT_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, @@ -3894,7 +3894,7 @@ (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLT_VV_"#vti.LMul.MX#"_MASK") VR:$merge, vti.RegClass:$rs1, @@ -3905,7 +3905,7 @@ def : Pat<(vti.Mask (int_riscv_vmsgtu (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLTU_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, @@ -3914,7 +3914,7 @@ (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLTU_VV_"#vti.LMul.MX#"_MASK") VR:$merge, vti.RegClass:$rs1, @@ -3931,7 +3931,7 @@ foreach vti = AllIntegerVectors in { def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1, (DecImm simm5_plus1:$rs2), GPR:$vl, @@ -3940,7 +3940,7 @@ (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), (vti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK") VR:$merge, vti.RegClass:$rs1, @@ -3951,7 +3951,7 @@ def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1, (DecImm simm5_plus1:$rs2), GPR:$vl, @@ -3960,7 +3960,7 @@ (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), (vti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK") VR:$merge, vti.RegClass:$rs1, @@ -3972,7 +3972,7 @@ // Special cases to avoid matching vmsltu.vi 0 (always false) to // vmsleu.vi -1 (always true). Instead match to vmsne.vv. def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1), - (vti.Scalar 0), (XLenVT (VLOp GPR:$vl)))), + (vti.Scalar 0), (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, @@ -3981,7 +3981,7 @@ (vti.Vector vti.RegClass:$rs1), (vti.Scalar 0), (vti.Mask V0), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK") VR:$merge, vti.RegClass:$rs1, @@ -4048,7 +4048,7 @@ //===----------------------------------------------------------------------===// foreach vti = AllVectors in { def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1), - (XLenVT (VLOp GPR:$vl)))), + (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVMV_V_V_"#vti.LMul.MX) $rs1, GPR:$vl, vti.SEW)>; @@ -4196,7 +4196,7 @@ defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2), (fvti.Scalar (fpimm0)), - (fvti.Mask V0), (XLenVT (VLOp GPR:$vl)))), + (fvti.Mask V0), (XLenVT (VLOp (XLenVT GPR:$vl))))), (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.SEW)>; } @@ -4357,7 +4357,7 @@ (instr $rs2, fvti.SEW)>; def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), - (fvti.Scalar fvti.ScalarRegClass:$rs2), (XLenVT (VLOp GPR:$vl)))), + (fvti.Scalar fvti.ScalarRegClass:$rs2), (XLenVT (VLOp (XLenVT GPR:$vl))))), (!cast("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" # fvti.LMul.MX) (fvti.Vector $rs1), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -116,7 +116,7 @@ DAGOperand xop_kind> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), - (vop_type (SplatPatKind xop_kind:$rs2)))), + (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), (!cast(instruction_name#_#suffix#_# vlmul.MX) vop_reg_class:$rs1, xop_kind:$rs2, @@ -219,7 +219,7 @@ foreach vti = AllIntegerVectors in { defvar instruction = !cast(instruction_name#_#kind#_#vti.LMul.MX); def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), - (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), + (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), SwapHelper<(instruction), (instruction vti.RegClass:$rs1), (instruction xop_kind:$rs2), @@ -375,7 +375,7 @@ // Handle VRSUB specially since it's the only integer binary op with reversed // pattern operands foreach vti = AllIntegerVectors in { - def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)), + def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.SEW)>; @@ -690,7 +690,7 @@ let Predicates = [HasStdExtV] in { foreach vti = AllIntegerVectors in { - def : Pat<(vti.Vector (SplatPat GPR:$rs1)), + def : Pat<(vti.Vector (SplatPat (XLenVT GPR:$rs1))), (!cast("PseudoVMV_V_X_" # vti.LMul.MX) GPR:$rs1, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (SplatPat_simm5 simm5:$rs1)), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -254,7 +254,7 @@ (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2), (mask_type true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_VV_"# vlmul.MX) op_reg_class:$rs1, op_reg_class:$rs2, @@ -274,9 +274,9 @@ DAGOperand xop_kind> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), - (vop_type (SplatPatKind xop_kind:$rs2)), + (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))), (mask_type true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#_#suffix#_# vlmul.MX) vop_reg_class:$rs1, xop_kind:$rs2, @@ -325,7 +325,7 @@ Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp scalar_reg_class:$rs2)), (mask_type true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_"#vlmul.MX) vop_reg_class:$rs1, scalar_reg_class:$rs2, @@ -348,7 +348,7 @@ def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), fvti.RegClass:$rs1, (fvti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, GPR:$vl, fvti.SEW)>; @@ -359,7 +359,7 @@ def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), vti.RegClass:$rs2, cc, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; @@ -372,7 +372,7 @@ def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), vti.RegClass:$rs1, invcc, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; @@ -382,14 +382,14 @@ CondCode cc, CondCode invcc> { defvar instruction = !cast(instruction_name#"_VX_"#vti.LMul.MX); def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), - (SplatPat GPR:$rs2), cc, + (SplatPat (XLenVT GPR:$rs2)), cc, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>; - def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat GPR:$rs2), + def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), (vti.Vector vti.RegClass:$rs1), invcc, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>; } @@ -400,12 +400,12 @@ def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), (SplatPat_simm5 simm5:$rs2), cc, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), (vti.Vector vti.RegClass:$rs1), invcc, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>; } @@ -417,14 +417,14 @@ fvti.RegClass:$rs2, cc, (fvti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(inst_name#"_VV_"#fvti.LMul.MX) fvti.RegClass:$rs1, fvti.RegClass:$rs2, GPR:$vl, fvti.SEW)>; def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1), (SplatFPOp fvti.ScalarRegClass:$rs2), cc, (fvti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, GPR:$vl, fvti.SEW)>; @@ -432,7 +432,7 @@ (fvti.Vector fvti.RegClass:$rs1), cc, (fvti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, GPR:$vl, fvti.SEW)>; @@ -445,7 +445,7 @@ defvar vti = vtiTofti.Vti; defvar fti = vtiTofti.Fti; def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), - true_mask, (VLOp GPR:$vl))), + true_mask, (VLOp (XLenVT GPR:$vl)))), (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX) fti.RegClass:$rs2, GPR:$vl, vti.SEW)>; } @@ -456,7 +456,7 @@ defvar ivti = GetIntVTypeInfo.Vti; def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_"#ivti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, ivti.SEW)>; } @@ -467,7 +467,7 @@ defvar ivti = GetIntVTypeInfo.Vti; def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), (ivti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_"#fvti.LMul.MX) ivti.RegClass:$rs1, GPR:$vl, fvti.SEW)>; } @@ -479,7 +479,7 @@ defvar iwti = GetIntVTypeInfo.Vti; def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.SEW)>; } @@ -491,7 +491,7 @@ defvar fwti = vtiToWti.Wti; def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), (ivti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_"#ivti.LMul.MX) ivti.RegClass:$rs1, GPR:$vl, ivti.SEW)>; } @@ -503,7 +503,7 @@ defvar fwti = vtiToWti.Wti; def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_"#vti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, vti.SEW)>; } @@ -515,7 +515,7 @@ defvar iwti = GetIntVTypeInfo.Vti; def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), (iwti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_"#fvti.LMul.MX) iwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>; } @@ -526,7 +526,7 @@ defvar vti_m1 = !cast(!if(is_float, "VF", "VI") # vti.SEW # "M1"); def: Pat<(vti_m1.Vector (vop (vti.Vector vti.RegClass:$rs1), VR:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(instruction_name#"_VS_"#vti.LMul.MX) (vti_m1.Vector (IMPLICIT_DEF)), (vti.Vector vti.RegClass:$rs1), @@ -546,21 +546,21 @@ defvar load_instr = !cast("PseudoVLE"#vti.SEW#"_V_"#vti.LMul.MX); defvar store_instr = !cast("PseudoVSE"#vti.SEW#"_V_"#vti.LMul.MX); // Load - def : Pat<(vti.Vector (riscv_vle_vl BaseAddr:$rs1, (VLOp GPR:$vl))), + def : Pat<(vti.Vector (riscv_vle_vl BaseAddr:$rs1, (VLOp (XLenVT GPR:$vl)))), (load_instr BaseAddr:$rs1, GPR:$vl, vti.SEW)>; // Store def : Pat<(riscv_vse_vl (vti.Vector vti.RegClass:$rs2), BaseAddr:$rs1, - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.SEW)>; } foreach mti = AllMasks in { defvar load_instr = !cast("PseudoVLE1_V_"#mti.BX); defvar store_instr = !cast("PseudoVSE1_V_"#mti.BX); - def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, (VLOp GPR:$vl))), + def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, (VLOp (XLenVT GPR:$vl)))), (load_instr BaseAddr:$rs1, GPR:$vl, mti.SEW)>; def : Pat<(riscv_vse_vl (mti.Mask VR:$rs2), BaseAddr:$rs1, - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.SEW)>; } @@ -570,14 +570,14 @@ // Handle VRSUB specially since it's the only integer binary op with reversed // pattern operands foreach vti = AllIntegerVectors in { - def : Pat<(riscv_sub_vl (vti.Vector (SplatPat GPR:$rs2)), + def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX) vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>; } @@ -612,7 +612,7 @@ defvar fti = vtiTofti.Fti; def : Pat<(fti.Vector (riscv_trunc_vector_vl (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVNSRL_WI_"#fti.LMul.MX) vti.RegClass:$rs1, 0, GPR:$vl, fti.SEW)>; } @@ -667,7 +667,7 @@ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm), vti.RegClass:$rs1, vti.RegClass:$rs2, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>; @@ -675,26 +675,26 @@ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>; } // 12.16. Vector Integer Move Instructions foreach vti = AllIntegerVectors in { - def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, (VLOp GPR:$vl))), + def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMV_V_X_"#vti.LMul.MX) $rs2, GPR:$vl, vti.SEW)>; defvar ImmPat = !cast("sew"#vti.SEW#"simm5"); def : Pat<(vti.Vector (riscv_vmv_v_x_vl (ImmPat XLenVT:$imm5), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMV_V_I_"#vti.LMul.MX) XLenVT:$imm5, GPR:$vl, vti.SEW)>; } @@ -738,37 +738,37 @@ defvar suffix = vti.LMul.MX # "_COMMUTABLE"; def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFMADD_VV_"# suffix) vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd, (riscv_fneg_vl vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFMSUB_VV_"# suffix) vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), vti.RegClass:$rd, (riscv_fneg_vl vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNMADD_VV_"# suffix) vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNMSUB_VV_"# suffix) vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; @@ -778,7 +778,7 @@ def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFMADD_V" # vti.ScalarSuffix # "_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; @@ -786,31 +786,31 @@ vti.RegClass:$rd, (riscv_fneg_vl vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFMSUB_V" # vti.ScalarSuffix # "_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), (riscv_fneg_vl vti.RegClass:$rd, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (riscv_fneg_vl vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), (riscv_fneg_vl vti.RegClass:$rd, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; @@ -818,22 +818,22 @@ // The splat might be negated. def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), vti.RegClass:$rd, (riscv_fneg_vl vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; @@ -855,39 +855,39 @@ foreach vti = AllFloatVectors in { // 14.8. Vector Floating-Point Square-Root Instruction def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVFSQRT_V_"# vti.LMul.MX) vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; // 14.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVFSGNJX_VV_"# vti.LMul.MX) vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>; // Handle fneg with VFSGNJN using the same input for both operands. def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVFSGNJ_VV_"# vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (riscv_fneg_vl vti.RegClass:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (SplatFPOp vti.ScalarRegClass:$rs2), (vti.Mask true_mask), - (VLOp GPR:$vl)), + (VLOp (XLenVT GPR:$vl))), (!cast("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX) vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.SEW)>; } @@ -899,7 +899,7 @@ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1, fvti.RegClass:$rs2, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm, GPR:$vl, fvti.SEW)>; @@ -907,7 +907,7 @@ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm), (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) fvti.RegClass:$rs2, (fvti.Scalar fvti.ScalarRegClass:$rs1), @@ -916,19 +916,19 @@ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm), (SplatFPOp (fvti.Scalar fpimm0)), fvti.RegClass:$rs2, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.SEW)>; // 14.16. Vector Floating-Point Move Instruction // If we're splatting fpimm0, use vmv.v.x vd, x0. def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl - (fvti.Scalar (fpimm0)), (VLOp GPR:$vl))), + (fvti.Scalar (fpimm0)), (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) 0, GPR:$vl, fvti.SEW)>; def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl - (fvti.Scalar fvti.ScalarRegClass:$rs2), (VLOp GPR:$vl))), + (fvti.Scalar fvti.ScalarRegClass:$rs2), (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # fvti.LMul.MX) (fvti.Scalar fvti.ScalarRegClass:$rs2), @@ -950,7 +950,7 @@ defvar fwti = fvtiToFWti.Wti; def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.SEW)>; } @@ -965,13 +965,13 @@ defvar fwti = fvtiToFWti.Wti; def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>; def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>; } @@ -983,62 +983,62 @@ foreach mti = AllMasks in { // 16.1 Vector Mask-Register Logical Instructions - def : Pat<(mti.Mask (riscv_vmset_vl (VLOp GPR:$vl))), + def : Pat<(mti.Mask (riscv_vmset_vl (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.SEW)>; - def : Pat<(mti.Mask (riscv_vmclr_vl (VLOp GPR:$vl))), + def : Pat<(mti.Mask (riscv_vmclr_vl (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.SEW)>; - def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, (VLOp GPR:$vl))), + def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMAND_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; - def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, (VLOp GPR:$vl))), + def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMOR_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; - def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, (VLOp GPR:$vl))), + def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMXOR_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; def : Pat<(mti.Mask (riscv_vmand_vl (riscv_vmnot_vl VR:$rs1, - (VLOp GPR:$vl)), - VR:$rs2, (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl))), + VR:$rs2, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMANDNOT_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; def : Pat<(mti.Mask (riscv_vmor_vl (riscv_vmnot_vl VR:$rs1, - (VLOp GPR:$vl)), - VR:$rs2, (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl))), + VR:$rs2, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMORNOT_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; // XOR is associative so we need 2 patterns for VMXNOR. def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, - (VLOp GPR:$vl)), - VR:$rs2, (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl))), + VR:$rs2, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMXNOR_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, - (VLOp GPR:$vl)), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMNAND_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, - (VLOp GPR:$vl)), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMNOR_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, - (VLOp GPR:$vl)), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMXNOR_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>; // Match the not idiom to the vnot.mm pseudo. - def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, (VLOp GPR:$vl))), + def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMNAND_MM_" # mti.LMul.MX) VR:$rs, VR:$rs, GPR:$vl, mti.SEW)>; // 16.2 Vector Mask Population Count vpopc def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVPOPC_M_" # mti.BX) VR:$rs2, GPR:$vl, mti.SEW)>; } @@ -1051,24 +1051,24 @@ foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), vti.ScalarRegClass:$rs1, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVMV_S_X_"#vti.LMul.MX) vti.RegClass:$merge, (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX) vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVRGATHER_VX_"# vti.LMul.MX) vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVRGATHER_VI_"# vti.LMul.MX) vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.SEW)>; @@ -1083,7 +1083,7 @@ def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, (ivti.Vector ivti.RegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(inst) vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.SEW)>; } @@ -1097,7 +1097,7 @@ foreach vti = AllFloatVectors in { def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), vti.ScalarRegClass:$rs1, - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) vti.RegClass:$merge, (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>; @@ -1105,17 +1105,17 @@ def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, (ivti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX) vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVRGATHER_VX_"# vti.LMul.MX) vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVRGATHER_VI_"# vti.LMul.MX) vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.SEW)>; @@ -1129,7 +1129,7 @@ def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, (ivti.Vector ivti.RegClass:$rs1), (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast(inst) vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.SEW)>; } @@ -1163,17 +1163,17 @@ foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>; } @@ -1182,7 +1182,7 @@ def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), uimm5:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVSLIDEUP_VI_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.SEW)>; @@ -1190,7 +1190,7 @@ def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVSLIDEUP_VX_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>; @@ -1198,7 +1198,7 @@ def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), uimm5:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.SEW)>; @@ -1206,7 +1206,7 @@ def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), - (VLOp GPR:$vl))), + (VLOp (XLenVT GPR:$vl)))), (!cast("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -247,8 +247,8 @@ //===----------------------------------------------------------------------===// /// Generic pattern classes -class PatFpr16Fpr16 - : Pat<(OpNode FPR16:$rs1, FPR16:$rs2), (Inst $rs1, $rs2)>; +class PatFpr16Fpr16 + : Pat<(vt (OpNode FPR16:$rs1, FPR16:$rs2)), (Inst $rs1, $rs2)>; class PatFpr16Fpr16DynFrm : Pat<(OpNode FPR16:$rs1, FPR16:$rs2), (Inst $rs1, $rs2, 0b111)>; @@ -256,7 +256,7 @@ let Predicates = [HasStdExtZfh] in { /// Float constants -def : Pat<(f16 (fpimm0)), (FMV_H_X X0)>; +def : Pat<(f16 (fpimm0)), (FMV_H_X (XLenVT X0))>; /// Float conversion operations @@ -275,7 +275,7 @@ def : Pat<(fneg FPR16:$rs1), (FSGNJN_H $rs1, $rs1)>; def : Pat<(fabs FPR16:$rs1), (FSGNJX_H $rs1, $rs1)>; -def : PatFpr16Fpr16; +def : PatFpr16Fpr16; def : Pat<(fcopysign FPR16:$rs1, (fneg FPR16:$rs2)), (FSGNJN_H $rs1, $rs2)>; def : Pat<(fcopysign FPR16:$rs1, FPR32:$rs2), (FSGNJ_H $rs1, (FCVT_H_S $rs2, 0b111))>; @@ -297,8 +297,8 @@ def : Pat<(fma (fneg FPR16:$rs1), FPR16:$rs2, (fneg FPR16:$rs3)), (FNMADD_H FPR16:$rs1, FPR16:$rs2, FPR16:$rs3, 0b111)>; -def : PatFpr16Fpr16; -def : PatFpr16Fpr16; +def : PatFpr16Fpr16; +def : PatFpr16Fpr16; /// Setcc @@ -309,7 +309,7 @@ def : PatFpr16Fpr16; def : PatFpr16Fpr16; -def Select_FPR16_Using_CC_GPR : SelectCC_rrirr; +def Select_FPR16_Using_CC_GPR : SelectCC_rrirr; /// Loads diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -117,13 +117,23 @@ def XLenVT : ValueTypeByHwMode<[RV32, RV64], [i32, i64]>; + +def XVEI8VT : ValueTypeByHwMode<[RV32, RV64], + [v4i8, v8i8]>; + +def XVEI16VT : ValueTypeByHwMode<[RV32, RV64], + [v2i16, v4i16]>; + +def XVEI32VT : ValueTypeByHwMode<[RV32, RV64], + [i32, v2i32]>; + def XLenRI : RegInfoByHwMode< [RV32, RV64], [RegInfo<32,32,32>, RegInfo<64,64,64>]>; // The order of registers represents the preferred allocation sequence. // Registers are listed in the order caller-save, callee-save, specials. -def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add +def GPR : RegisterClass<"RISCV", [XLenVT, XVEI8VT, XVEI16VT, XVEI32VT], 32, (add (sequence "X%u", 10, 17), (sequence "X%u", 5, 7), (sequence "X%u", 28, 31),