diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -126,7 +126,7 @@ addRegisterClass(MVT::f32, &RISCV::GPRF32RegClass); if (Subtarget.hasStdExtZdinx()) { if (Subtarget.is64Bit()) - addRegisterClass(MVT::f64, &RISCV::GPRF64RegClass); + addRegisterClass(MVT::f64, &RISCV::GPRRegClass); else addRegisterClass(MVT::f64, &RISCV::GPRPF64RegClass); } @@ -13827,7 +13827,7 @@ I2FOpc = RISCV::FCVT_D_L_INX; FSGNJOpc = RISCV::FSGNJ_D_INX; FSGNJXOpc = RISCV::FSGNJX_D_INX; - RC = &RISCV::GPRF64RegClass; + RC = &RISCV::GPRRegClass; break; } @@ -16016,7 +16016,6 @@ // Subtarget into account. if (Res.second == &RISCV::GPRF16RegClass || Res.second == &RISCV::GPRF32RegClass || - Res.second == &RISCV::GPRF64RegClass || Res.second == &RISCV::GPRPF64RegClass) return std::make_pair(Res.first, &RISCV::GPRRegClass); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1203,9 +1203,9 @@ /// Generic pattern classes class PatGpr - : Pat<(vt (OpNode GPR:$rs1)), (Inst GPR:$rs1)>; + : Pat<(vt (OpNode (vt GPR:$rs1))), (Inst GPR:$rs1)>; class PatGprGpr - : Pat<(vt (OpNode GPR:$rs1, GPR:$rs2)), (Inst GPR:$rs1, GPR:$rs2)>; + : Pat<(vt (OpNode (vt GPR:$rs1), (vt GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>; class PatGprImm : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), ImmType:$imm)), @@ -1322,7 +1322,7 @@ /// FrameIndex calculations -def : Pat<(FrameAddrRegImm GPR:$rs1, simm12:$imm12), +def : Pat<(FrameAddrRegImm (iPTR GPR:$rs1), simm12:$imm12), (ADDI GPR:$rs1, simm12:$imm12)>; /// HI and ADD_LO address nodes. @@ -1365,9 +1365,9 @@ // Define pattern expansions for setcc operations that aren't directly // handled by a RISC-V instruction. -def : Pat<(riscv_seteq GPR:$rs1), (SLTIU GPR:$rs1, 1)>; -def : Pat<(riscv_setne GPR:$rs1), (SLTU X0, GPR:$rs1)>; -def : Pat<(setne GPR:$rs1, -1), (SLTIU GPR:$rs1, -1)>; +def : Pat<(riscv_seteq (XLenVT GPR:$rs1)), (SLTIU GPR:$rs1, 1)>; +def : Pat<(riscv_setne (XLenVT GPR:$rs1)), (SLTU (XLenVT X0), GPR:$rs1)>; +def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), -1)), (SLTIU GPR:$rs1, -1)>; def IntCCtoRISCVCC : SDNodeXForm(N->getOperand(2))->get(); @@ -1390,8 +1390,9 @@ (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, GPR:$falsev, GPR:$truev), [(set GPR:$dst, - (riscv_selectcc_frag:$cc GPR:$lhs, GPR:$rhs, - cond, GPR:$truev, + (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), + GPR:$rhs, cond, + (XLenVT GPR:$truev), GPR:$falsev))]>, Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>; } @@ -1434,29 +1435,29 @@ Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>; } -multiclass SelectCC_GPR_rrirr { +multiclass SelectCC_GPR_rrirr { let usesCustomInserter = 1 in def _Using_CC_GPR : Pseudo<(outs valty:$dst), (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, valty:$truev, valty:$falsev), [(set valty:$dst, - (riscv_selectcc_frag:$cc GPR:$lhs, GPR:$rhs, cond, - valty:$truev, valty:$falsev))]>; + (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), GPR:$rhs, cond, + (vt valty:$truev), valty:$falsev))]>; // Explicitly select 0 in the condition to X0. The register coalescer doesn't // always do it. - def : Pat<(riscv_selectcc_frag:$cc GPR:$lhs, 0, cond, valty:$truev, + def : Pat<(riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 0, cond, (vt valty:$truev), valty:$falsev), - (!cast(NAME#"_Using_CC_GPR") GPR:$lhs, X0, + (!cast(NAME#"_Using_CC_GPR") GPR:$lhs, (XLenVT X0), (IntCCtoRISCVCC $cc), valty:$truev, valty:$falsev)>; } let Predicates = [NoShortForwardBranchOpt] in -defm Select_GPR : SelectCC_GPR_rrirr; +defm Select_GPR : SelectCC_GPR_rrirr; class SelectCompressOpt - : Pat<(riscv_selectcc_frag:$select GPR:$lhs, simm12_no6:$Constant, Cond, - GPR:$truev, GPR:$falsev), - (Select_GPR_Using_CC_GPR (ADDI GPR:$lhs, (NegImm simm12:$Constant)), X0, + : Pat<(riscv_selectcc_frag:$select (XLenVT GPR:$lhs), simm12_no6:$Constant, Cond, + (XLenVT GPR:$truev), GPR:$falsev), + (Select_GPR_Using_CC_GPR (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0), (IntCCtoRISCVCC $select), GPR:$truev, GPR:$falsev)>; def OptForMinSize : Predicate<"MF ? MF->getFunction().hasMinSize() : false">; @@ -1470,16 +1471,16 @@ // Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction. multiclass BccPat { - def : Pat<(riscv_brcc GPR:$rs1, GPR:$rs2, Cond, bb:$imm12), + def : Pat<(riscv_brcc (XLenVT GPR:$rs1), GPR:$rs2, Cond, bb:$imm12), (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>; // Explicitly select 0 to X0. The register coalescer doesn't always do it. - def : Pat<(riscv_brcc GPR:$rs1, 0, Cond, bb:$imm12), - (Inst GPR:$rs1, X0, simm13_lsb0:$imm12)>; + def : Pat<(riscv_brcc (XLenVT GPR:$rs1), 0, Cond, bb:$imm12), + (Inst GPR:$rs1, (XLenVT X0), simm13_lsb0:$imm12)>; } class BrccCompressOpt : Pat<(riscv_brcc GPR:$lhs, simm12_no6:$Constant, Cond, bb:$place), - (Inst (ADDI GPR:$lhs, (NegImm simm12:$Constant)), X0, bb:$place)>; + (Inst (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0), bb:$place)>; defm : BccPat; defm : BccPat; @@ -1552,8 +1553,8 @@ def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>; def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>; -def : Pat<(riscv_sret_glue), (SRET X0, X0)>; -def : Pat<(riscv_mret_glue), (MRET X0, X0)>; +def : Pat<(riscv_sret_glue), (SRET (XLenVT X0), (XLenVT X0))>; +def : Pat<(riscv_mret_glue), (MRET (XLenVT X0), (XLenVT X0))>; let isCall = 1, Defs = [X1] in def PseudoCALLIndirect : Pseudo<(outs), (ins GPRJALR:$rs1), @@ -1606,7 +1607,7 @@ def PseudoLGA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], "lga", "$dst, $src">; -def : Pat<(riscv_lga tglobaladdr:$in), (PseudoLGA tglobaladdr:$in)>; +def : Pat<(iPTR (riscv_lga tglobaladdr:$in)), (PseudoLGA tglobaladdr:$in)>; let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, isAsmParserOnly = 1 in @@ -1618,7 +1619,7 @@ def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], "la.tls.ie", "$dst, $src">; -def : Pat<(riscv_la_tls_ie tglobaltlsaddr:$in), +def : Pat<(iPTR (riscv_la_tls_ie tglobaltlsaddr:$in)), (PseudoLA_TLS_IE tglobaltlsaddr:$in)>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, @@ -1650,7 +1651,7 @@ /// Loads class LdPat - : Pat<(vt (LoadOp (AddrRegImm GPR:$rs1, simm12:$imm12))), + : Pat<(vt (LoadOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))), (Inst GPR:$rs1, simm12:$imm12)>; def : LdPat; @@ -1665,7 +1666,8 @@ class StPat - : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)), + : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm (XLenVT GPR:$rs1), + simm12:$imm12)), (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>; def : StPat; @@ -1700,7 +1702,7 @@ class ReadSysReg Regs> : Pseudo<(outs GPR:$rd), (ins), - [(set GPR:$rd, (riscv_read_csr (XLenVT SR.Encoding)))]>, + [(set GPR:$rd, (XLenVT (riscv_read_csr (XLenVT SR.Encoding))))]>, PseudoInstExpansion<(CSRRS GPR:$rd, SR.Encoding, X0)> { let hasSideEffects = 0; let Uses = Regs; @@ -1708,7 +1710,7 @@ class WriteSysReg Regs> : Pseudo<(outs), (ins GPR:$val), - [(riscv_write_csr (XLenVT SR.Encoding), GPR:$val)]>, + [(riscv_write_csr (XLenVT SR.Encoding), (XLenVT GPR:$val))]>, PseudoInstExpansion<(CSRRW X0, SR.Encoding, GPR:$val)> { let hasSideEffects = 0; let Defs = Regs; @@ -1724,7 +1726,7 @@ class SwapSysReg Regs> : Pseudo<(outs GPR:$rd), (ins GPR:$val), - [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), GPR:$val))]>, + [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), (XLenVT GPR:$val)))]>, PseudoInstExpansion<(CSRRW GPR:$rd, SR.Encoding, GPR:$val)> { let hasSideEffects = 0; let Uses = Regs; @@ -1733,7 +1735,7 @@ class SwapSysRegImm Regs> : Pseudo<(outs GPR:$rd), (ins uimm5:$val), - [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val))]>, + [(set GPR:$rd, (XLenVT (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val)))]>, PseudoInstExpansion<(CSRRWI GPR:$rd, SR.Encoding, uimm5:$val)> { let hasSideEffects = 0; let Uses = Regs; @@ -1860,7 +1862,7 @@ /// readcyclecounter // On RV64, we can directly read the 64-bit "cycle" CSR. let Predicates = [IsRV64] in -def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, X0)>; +def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, (XLenVT X0))>; // On RV32, ReadCycleWide will be expanded to the suggested loop reading both // halves of the 64-bit "cycle" CSR. let Predicates = [IsRV32], usesCustomInserter = 1, hasNoSchedulingInfo = 1 in @@ -1886,7 +1888,7 @@ (i32 timm:$accessinfo))]>; /// Simple optimization -def : Pat<(add GPR:$rs1, (AddiPair:$rs2)), +def : Pat<(XLenVT (add GPR:$rs1, (AddiPair:$rs2))), (ADDI (ADDI GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)), (AddiPairImmSmall GPR:$rs2))>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td @@ -45,7 +45,8 @@ class AtomicStPat - : Pat<(StoreOp (AddrRegImm GPR:$rs1, simm12:$imm12), (vt StTy:$rs2)), + : Pat<(StoreOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12), + (vt StTy:$rs2)), (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>; //===----------------------------------------------------------------------===// @@ -149,16 +150,16 @@ defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">; defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">; -def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr), - (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr), - (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr), - (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr), - (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; -def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr), - (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr)), + (AMOADD_W GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_acquire GPR:$addr, GPR:$incr)), + (AMOADD_W_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_release GPR:$addr, GPR:$incr)), + (AMOADD_W_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr)), + (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; +def : Pat<(XLenVT (atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr)), + (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; /// Pseudo AMOs @@ -174,15 +175,15 @@ def PseudoAtomicLoadNand32 : PseudoAMO; // Ordering constants must be kept in sync with the AtomicOrdering enum in // AtomicOrdering.h. -def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>; -def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_acquire GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>; -def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_release GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>; -def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>; -def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr), +def : Pat<(XLenVT (atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr)), (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>; class PseudoMaskedAMO @@ -322,15 +323,15 @@ /// 64-bit AMOs def : Pat<(i64 (atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr)), - (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_acquire GPR:$addr, GPR:$incr)), - (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_release GPR:$addr, GPR:$incr)), - (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr)), - (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; def : Pat<(i64 (atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr)), - (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; + (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>; /// 64-bit pseudo AMOs diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -45,7 +45,7 @@ let RenderMethod = "addRegOperands"; } -def FPR64INX : RegisterOperand { +def FPR64INX : RegisterOperand { let ParserMatchClass = GPRF64AsFPR; let DecoderMethod = "DecodeGPRRegisterClass"; } @@ -394,76 +394,76 @@ // Match non-signaling FEQ_D foreach Ext = DExts in { - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; } let Predicates = [HasStdExtD] in { // Match signaling FEQ_D -def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ)), (AND (FLE_D $rs1, $rs2), (FLE_D $rs2, $rs1))>; -def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETOEQ)), (AND (FLE_D $rs1, $rs2), (FLE_D $rs2, $rs1))>; // If both operands are the same, use a single FLE. -def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETEQ)), (FLE_D $rs1, $rs1)>; -def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETOEQ)), (FLE_D $rs1, $rs1)>; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; } // Predicates = [HasStdExtD] let Predicates = [HasStdExtZdinx, IsRV64] in { // Match signaling FEQ_D -def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETEQ), +def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs2, SETEQ)), (AND (FLE_D_INX $rs1, $rs2), (FLE_D_INX $rs2, $rs1))>; -def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs2, SETOEQ)), (AND (FLE_D_INX $rs1, $rs2), (FLE_D_INX $rs2, $rs1))>; // If both operands are the same, use a single FLE. -def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETEQ), +def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs1, SETEQ)), (FLE_D_INX $rs1, $rs1)>; -def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs1, SETOEQ)), (FLE_D_INX $rs1, $rs1)>; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; } // Predicates = [HasStdExtZdinx, IsRV64] let Predicates = [HasStdExtZdinx, IsRV32] in { // Match signaling FEQ_D -def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETEQ)), (AND (FLE_D_IN32X $rs1, $rs2), (FLE_D_IN32X $rs2, $rs1))>; -def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETOEQ)), (AND (FLE_D_IN32X $rs1, $rs2), (FLE_D_IN32X $rs2, $rs1))>; // If both operands are the same, use a single FLE. -def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETEQ)), (FLE_D_IN32X $rs1, $rs1)>; -def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETOEQ)), (FLE_D_IN32X $rs1, $rs1)>; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; } // Predicates = [HasStdExtZdinx, IsRV32] let Predicates = [HasStdExtD] in { -defm Select_FPR64 : SelectCC_GPR_rrirr; +defm Select_FPR64 : SelectCC_GPR_rrirr; def PseudoFROUND_D : PseudoFROUND; @@ -492,34 +492,32 @@ } // Predicates = [HasStdExtD] let Predicates = [HasStdExtZdinx, IsRV64] in { -defm Select_FPR64INX : SelectCC_GPR_rrirr; +defm Select_FPR64INX : SelectCC_GPR_rrirr; def PseudoFROUND_D_INX : PseudoFROUND; /// Loads -def : Pat<(f64 (load (AddrRegImm GPR:$rs1, simm12:$imm12))), - (COPY_TO_REGCLASS (LD GPR:$rs1, simm12:$imm12), GPRF64)>; +def : LdPat; /// Stores -def : Pat<(store (f64 FPR64INX:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)), - (SD (COPY_TO_REGCLASS FPR64INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>; +def : StPat; } // Predicates = [HasStdExtZdinx, IsRV64] let Predicates = [HasStdExtZdinx, IsRV32] in { -defm Select_FPR64IN32X : SelectCC_GPR_rrirr; +defm Select_FPR64IN32X : SelectCC_GPR_rrirr; def PseudoFROUND_D_IN32X : PseudoFROUND; /// Loads let isCall = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in def PseudoRV32ZdinxLD : Pseudo<(outs GPRPF64:$dst), (ins GPR:$rs1, simm12:$imm12), []>; -def : Pat<(f64 (load (AddrRegImmINX GPR:$rs1, simm12:$imm12))), +def : Pat<(f64 (load (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12))), (PseudoRV32ZdinxLD GPR:$rs1, simm12:$imm12)>; /// Stores let isCall = 0, mayLoad = 0, mayStore = 1, Size = 8, isCodeGenOnly = 1 in def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPF64:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>; -def : Pat<(store (f64 GPRPF64:$rs2), (AddrRegImmINX GPR:$rs1, simm12:$imm12)), +def : Pat<(store (f64 GPRPF64:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)), (PseudoRV32ZdinxSD GPRPF64:$rs2, GPR:$rs1, simm12:$imm12)>; /// Pseudo-instructions needed for the soft-float ABI with RV32D @@ -619,8 +617,8 @@ let Predicates = [HasStdExtZdinx, IsRV64] in { // Moves (no conversion) -def : Pat<(f64 (bitconvert (i64 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPRF64)>; -def : Pat<(i64 (bitconvert FPR64INX:$rs1)), (COPY_TO_REGCLASS FPR64INX:$rs1, GPR)>; +def : Pat<(f64 (bitconvert (i64 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPR)>; +def : Pat<(i64 (bitconvert (f64 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPR)>; // Use target specific isd nodes to help us remember the result is sign // extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -436,13 +436,13 @@ /// Generic pattern classes class PatSetCC - : Pat<(OpNode Ty:$rs1, Ty:$rs2, Cond), (Inst $rs1, $rs2)>; + RVInst Inst, ValueType vt> + : Pat<(XLenVT (OpNode (vt Ty:$rs1), Ty:$rs2, Cond)), (Inst $rs1, $rs2)>; multiclass PatSetCC_m { + RVInst Inst, ExtInfo Ext, ValueType vt> { let Predicates = Ext.Predicates in def Ext.Suffix : PatSetCC(Inst#Ext.Suffix)>; + !cast(Inst#Ext.Suffix), vt>; } class PatFprFpr; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; } let Predicates = [HasStdExtF] in { // Match signaling FEQ_S -def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ)), (AND (FLE_S $rs1, $rs2), (FLE_S $rs2, $rs1))>; -def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETOEQ)), (AND (FLE_S $rs1, $rs2), (FLE_S $rs2, $rs1))>; // If both operands are the same, use a single FLE. -def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETEQ)), (FLE_S $rs1, $rs1)>; -def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ)), (FLE_S $rs1, $rs1)>; } // Predicates = [HasStdExtF] let Predicates = [HasStdExtZfinx] in { // Match signaling FEQ_S -def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETEQ)), (AND (FLE_S_INX $rs1, $rs2), (FLE_S_INX $rs2, $rs1))>; -def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETOEQ)), (AND (FLE_S_INX $rs1, $rs2), (FLE_S_INX $rs2, $rs1))>; // If both operands are the same, use a single FLE. -def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETEQ)), (FLE_S_INX $rs1, $rs1)>; -def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETOEQ)), (FLE_S_INX $rs1, $rs1)>; } // Predicates = [HasStdExtZfinx] foreach Ext = FExts in { - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; } let Predicates = [HasStdExtF] in { -defm Select_FPR32 : SelectCC_GPR_rrirr; +defm Select_FPR32 : SelectCC_GPR_rrirr; def PseudoFROUND_S : PseudoFROUND; @@ -623,16 +623,16 @@ } // Predicates = [HasStdExtF] let Predicates = [HasStdExtZfinx] in { -defm Select_FPR32INX : SelectCC_GPR_rrirr; +defm Select_FPR32INX : SelectCC_GPR_rrirr; def PseudoFROUND_S_INX : PseudoFROUND; /// Loads -def : Pat<(f32 (load (AddrRegImm GPR:$rs1, simm12:$imm12))), +def : Pat<(f32 (load (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))), (COPY_TO_REGCLASS (LW GPR:$rs1, simm12:$imm12), GPRF32)>; /// Stores -def : Pat<(store (f32 FPR32INX:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)), +def : Pat<(store (f32 FPR32INX:$rs2), (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12)), (SW (COPY_TO_REGCLASS FPR32INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>; } // Predicates = [HasStdExtZfinx] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -6862,7 +6862,7 @@ foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in - def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)), + def : Pat<(XLenVT (riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2))), (!cast("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>; // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td } @@ -6884,7 +6884,7 @@ def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), (fvti.Scalar (fpimm0)), VLOpFrag)), (!cast("PseudoVMV_S_X_" # fvti.LMul.MX) - (fvti.Vector $rs1), X0, GPR:$vl, fvti.Log2SEW)>; + (fvti.Vector $rs1), (XLenVT X0), GPR:$vl, fvti.Log2SEW)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -107,7 +107,7 @@ bit isSEWAware = 0> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), - (vop_type (SplatPatKind xop_kind:$rs2)))), + (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), (!cast( !if(isSEWAware, instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", @@ -234,7 +234,7 @@ defvar instruction = !cast(instruction_name#_#kind#_#vti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), - (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), + (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; } } @@ -250,9 +250,9 @@ defvar instruction = !cast(instruction_name#_#kind#_#vti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), - (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), + (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; - def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind xop_kind:$rs2)), + def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), (vti.Vector vti.RegClass:$rs1), invcc)), (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; } @@ -408,7 +408,7 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), - (wti.Vector (extop2 (vti.Vector (SplatPat GPR:$rs1))))), + (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_VX_"#vti.LMul.MX) vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>; } @@ -428,7 +428,7 @@ wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), - (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))), + (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_WX_"#vti.LMul.MX) wti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>; } @@ -464,7 +464,7 @@ GetVTypePredicates.Predicates) in def : Pat< (add (wti.Vector wti.RegClass:$rd), - (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat GPR:$rs1)))), + (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))), (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), (!cast(instruction_name#"_VX_"#vti.LMul.MX) wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, @@ -713,7 +713,7 @@ // pattern operands foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { - def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)), + def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1205,7 +1205,7 @@ (vti.Vector (riscv_trunc_vector_vl (op (wti.Vector wti.RegClass:$rs2), - (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))), + (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) @@ -1337,7 +1337,7 @@ (vti.Vector (riscv_trunc_vector_vl (op (wti.Vector wti.RegClass:$rs2), - (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1)), + (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), (vti.Mask true_mask), VLOpFrag)), srcvalue, (wti.Mask true_mask), VLOpFrag), (vti.Mask true_mask), VLOpFrag)), @@ -2309,7 +2309,7 @@ (vti.Scalar (fpimm0)), VLOpFrag)), (!cast("PseudoVMV_S_X_"#vti.LMul.MX) - vti.RegClass:$merge, X0, GPR:$vl, vti.Log2SEW)>; + vti.RegClass:$merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td @@ -519,7 +519,7 @@ // Pseudo-instructions and codegen patterns //===----------------------------------------------------------------------===// let Predicates = [HasVendorXTHeadBa] in { -def : Pat<(add GPR:$rs1, (shl GPR:$rs2, uimm2:$uimm2)), +def : Pat<(add (XLenVT GPR:$rs1), (shl GPR:$rs2, uimm2:$uimm2)), (TH_ADDSL GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>; // Reuse complex patterns from StdExtZba @@ -549,18 +549,18 @@ def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2), (TH_ADDSL GPR:$rs2, (TH_ADDSL GPR:$rs1, GPR:$rs1, 3), 3)>; -def : Pat<(add GPR:$r, CSImm12MulBy4:$i), - (TH_ADDSL GPR:$r, (ADDI X0, (SimmShiftRightBy2XForm CSImm12MulBy4:$i)), 2)>; -def : Pat<(add GPR:$r, CSImm12MulBy8:$i), - (TH_ADDSL GPR:$r, (ADDI X0, (SimmShiftRightBy3XForm CSImm12MulBy8:$i)), 3)>; +def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i), + (TH_ADDSL GPR:$r, (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i)), 2)>; +def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i), + (TH_ADDSL GPR:$r, (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i)), 3)>; -def : Pat<(mul GPR:$r, C3LeftShift:$i), +def : Pat<(mul (XLenVT GPR:$r), C3LeftShift:$i), (SLLI (TH_ADDSL GPR:$r, GPR:$r, 1), (TrailingZeros C3LeftShift:$i))>; -def : Pat<(mul GPR:$r, C5LeftShift:$i), +def : Pat<(mul (XLenVT GPR:$r), C5LeftShift:$i), (SLLI (TH_ADDSL GPR:$r, GPR:$r, 2), (TrailingZeros C5LeftShift:$i))>; -def : Pat<(mul GPR:$r, C9LeftShift:$i), +def : Pat<(mul (XLenVT GPR:$r), C9LeftShift:$i), (SLLI (TH_ADDSL GPR:$r, GPR:$r, 3), (TrailingZeros C9LeftShift:$i))>; @@ -596,14 +596,14 @@ def : PatGprImm; // There's no encoding for a rotate-left-immediate in X-THead-Bb, as // it can be implemented with th.srri by negating the immediate. -def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt), +def : Pat<(rotl (XLenVT GPR:$rs1), uimmlog2xlen:$shamt), (TH_SRRI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>; -def : Pat<(sext_inreg GPR:$rs1, i32), (TH_EXT GPR:$rs1, 31, 0)>; -def : Pat<(sext_inreg GPR:$rs1, i16), (TH_EXT GPR:$rs1, 15, 0)>; -def : Pat<(sext_inreg GPR:$rs1, i8), (TH_EXT GPR:$rs1, 7, 0)>; -def : Pat<(sext_inreg GPR:$rs1, i1), (TH_EXT GPR:$rs1, 0, 0)>; +def : Pat<(sext_inreg (XLenVT GPR:$rs1), i32), (TH_EXT GPR:$rs1, 31, 0)>; +def : Pat<(sext_inreg (XLenVT GPR:$rs1), i16), (TH_EXT GPR:$rs1, 15, 0)>; +def : Pat<(sext_inreg (XLenVT GPR:$rs1), i8), (TH_EXT GPR:$rs1, 7, 0)>; +def : Pat<(sext_inreg (XLenVT GPR:$rs1), i1), (TH_EXT GPR:$rs1, 0, 0)>; def : PatGpr; -def : Pat<(ctlz (xor GPR:$rs1, -1)), (TH_FF0 GPR:$rs1)>; +def : Pat<(XLenVT (ctlz (xor (XLenVT GPR:$rs1), -1))), (TH_FF0 GPR:$rs1)>; def : PatGpr; } // Predicates = [HasVendorXTHeadBb] @@ -620,37 +620,39 @@ } // Predicates = [HasVendorXTHeadBb, IsRV64] let Predicates = [HasVendorXTHeadBs] in { -def : Pat<(and (srl GPR:$rs1, uimmlog2xlen:$shamt), 1), +def : Pat<(and (srl (XLenVT GPR:$rs1), uimmlog2xlen:$shamt), 1), (TH_TST GPR:$rs1, uimmlog2xlen:$shamt)>; -def : Pat<(seteq (and GPR:$rs1, SingleBitSetMask:$mask), 0), +def : Pat<(XLenVT (seteq (and (XLenVT GPR:$rs1), SingleBitSetMask:$mask), 0)), (TH_TST (XORI GPR:$rs1, -1), SingleBitSetMask:$mask)>; } // Predicates = [HasVendorXTHeadBs] let Predicates = [HasVendorXTHeadCondMov] in { -def : Pat<(select GPR:$cond, GPR:$a, GPR:$b), +def : Pat<(select (XLenVT GPR:$cond), (XLenVT GPR:$a), (XLenVT GPR:$b)), (TH_MVEQZ GPR:$a, GPR:$b, GPR:$cond)>; -def : Pat<(select GPR:$cond, GPR:$a, (XLenVT 0)), - (TH_MVEQZ GPR:$a, X0, GPR:$cond)>; -def : Pat<(select GPR:$cond, (XLenVT 0), GPR:$b), - (TH_MVNEZ GPR:$b, X0, GPR:$cond)>; +def : Pat<(select (XLenVT GPR:$cond), (XLenVT GPR:$a), (XLenVT 0)), + (TH_MVEQZ GPR:$a, (XLenVT X0), GPR:$cond)>; +def : Pat<(select (XLenVT GPR:$cond), (XLenVT 0), (XLenVT GPR:$b)), + (TH_MVNEZ GPR:$b, (XLenVT X0), GPR:$cond)>; -def : Pat<(select (riscv_seteq GPR:$cond), GPR:$a, GPR:$b), +def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT GPR:$b)), (TH_MVNEZ GPR:$a, GPR:$b, GPR:$cond)>; -def : Pat<(select (riscv_setne GPR:$cond), GPR:$a, GPR:$b), +def : Pat<(select (riscv_setne (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT GPR:$b)), (TH_MVEQZ GPR:$a, GPR:$b, GPR:$cond)>; -def : Pat<(select (riscv_seteq GPR:$cond), GPR:$a, (XLenVT 0)), - (TH_MVNEZ GPR:$a, X0, GPR:$cond)>; -def : Pat<(select (riscv_setne GPR:$cond), GPR:$a, (XLenVT 0)), - (TH_MVEQZ GPR:$a, X0, GPR:$cond)>; -def : Pat<(select (riscv_seteq GPR:$cond), (XLenVT 0), GPR:$b), - (TH_MVEQZ GPR:$b, X0, GPR:$cond)>; -def : Pat<(select (riscv_setne GPR:$cond), (XLenVT 0), GPR:$b), - (TH_MVNEZ GPR:$b, X0, GPR:$cond)>; +def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT 0)), + (TH_MVNEZ GPR:$a, (XLenVT X0), GPR:$cond)>; +def : Pat<(select (riscv_setne (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT 0)), + (TH_MVEQZ GPR:$a, (XLenVT X0), GPR:$cond)>; +def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (XLenVT 0), (XLenVT GPR:$b)), + (TH_MVEQZ GPR:$b, (XLenVT X0), GPR:$cond)>; +def : Pat<(select (riscv_setne (XLenVT GPR:$cond)), (XLenVT 0), (XLenVT GPR:$b)), + (TH_MVNEZ GPR:$b, (XLenVT X0), GPR:$cond)>; } // Predicates = [HasVendorXTHeadCondMov] let Predicates = [HasVendorXTHeadMac] in { -def : Pat<(add GPR:$rd, (mul GPR:$rs1, GPR:$rs2)), (TH_MULA GPR:$rd, GPR:$rs1, GPR:$rs2)>; -def : Pat<(sub GPR:$rd, (mul GPR:$rs1, GPR:$rs2)), (TH_MULS GPR:$rd, GPR:$rs1, GPR:$rs2)>; +def : Pat<(add GPR:$rd, (mul (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (TH_MULA GPR:$rd, GPR:$rs1, GPR:$rs2)>; +def : Pat<(sub GPR:$rd, (mul (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))), + (TH_MULS GPR:$rd, GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasVendorXTHeadMac] let Predicates = [HasVendorXTHeadMac, IsRV64] in { @@ -661,19 +663,21 @@ (TH_MULSW GPR:$rd, GPR:$rs1, GPR:$rs2)>; // mulah, mulsh produce a sign-extended result. def : Pat<(binop_allwusers GPR:$rd, (mul - (sexti16 GPR:$rs1), - (sexti16 GPR:$rs2))), + (sexti16 (i64 GPR:$rs1)), + (sexti16 (i64 GPR:$rs2)))), (TH_MULAH GPR:$rd, GPR:$rs1, GPR:$rs2)>; def : Pat<(binop_allwusers GPR:$rd, (mul - (sexti16 GPR:$rs1), - (sexti16 GPR:$rs2))), + (sexti16 (i64 GPR:$rs1)), + (sexti16 (i64 GPR:$rs2)))), (TH_MULSH GPR:$rd, GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasVendorXTHeadMac, IsRV64] let Predicates = [HasVendorXTHeadMac, IsRV32] in { -def : Pat<(i32 (add GPR:$rd, (mul (sexti16 GPR:$rs1), (sexti16 GPR:$rs2)))), +def : Pat<(i32 (add GPR:$rd, (mul (sexti16 (i32 GPR:$rs1)), + (sexti16 (i32 GPR:$rs2))))), (TH_MULAH GPR:$rd, GPR:$rs1, GPR:$rs2)>; -def : Pat<(i32 (sub GPR:$rd, (mul (sexti16 GPR:$rs1), (sexti16 GPR:$rs2)))), +def : Pat<(i32 (sub GPR:$rd, (mul (sexti16 (i32 GPR:$rs1)), + (sexti16 (i32 GPR:$rs2))))), (TH_MULSH GPR:$rd, GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasVendorXTHeadMac, IsRV32] @@ -760,26 +764,26 @@ [], [], 10>; multiclass LdIdxPat { -def : Pat<(vt (LoadOp (AddrRegRegScale GPR:$rs1, GPR:$rs2, uimm2:$uimm2))), +def : Pat<(vt (LoadOp (AddrRegRegScale (XLenVT GPR:$rs1), (XLenVT GPR:$rs2), uimm2:$uimm2))), (Inst GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>; } multiclass LdZextIdxPat { -def : Pat<(vt (LoadOp (AddrRegZextRegScale (i64 GPR:$rs1), GPR:$rs2, uimm2:$uimm2))), +def : Pat<(vt (LoadOp (AddrRegZextRegScale (i64 GPR:$rs1), (i64 GPR:$rs2), uimm2:$uimm2))), (Inst GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>; } multiclass StIdxPat { def : Pat<(StoreOp (vt StTy:$rd), - (AddrRegRegScale GPR:$rs1, GPR:$rs2, uimm2:$uimm2)), + (AddrRegRegScale (XLenVT GPR:$rs1), (XLenVT GPR:$rs2), uimm2:$uimm2)), (Inst StTy:$rd, GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>; } multiclass StZextIdxPat { def : Pat<(StoreOp (vt StTy:$rd), - (AddrRegZextRegScale (i64 GPR:$rs1), GPR:$rs2, uimm2:$uimm2)), + (AddrRegZextRegScale (i64 GPR:$rs1), (i64 GPR:$rs2), uimm2:$uimm2)), (Inst StTy:$rd, GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td @@ -30,33 +30,33 @@ let Predicates = [IsRV64, HasVendorXVentanaCondOps] in { // Directly use MASKC/MASKCN in case of any of the operands being 0. -def : Pat<(select GPR:$rc, GPR:$rs1, (i64 0)), +def : Pat<(select (i64 GPR:$rc), GPR:$rs1, (i64 0)), (VT_MASKC GPR:$rs1, GPR:$rc)>; -def : Pat<(select GPR:$rc, (i64 0), GPR:$rs1), +def : Pat<(select (i64 GPR:$rc), (i64 0), GPR:$rs1), (VT_MASKCN GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, (i64 0)), +def : Pat<(select (riscv_setne (i64 GPR:$rc)), GPR:$rs1, (i64 0)), (VT_MASKC GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs1, (i64 0)), +def : Pat<(select (riscv_seteq (i64 GPR:$rc)), GPR:$rs1, (i64 0)), (VT_MASKCN GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_setne GPR:$rc), (i64 0), GPR:$rs1), +def : Pat<(select (riscv_setne (i64 GPR:$rc)), (i64 0), GPR:$rs1), (VT_MASKCN GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_seteq GPR:$rc), (i64 0), GPR:$rs1), +def : Pat<(select (riscv_seteq (i64 GPR:$rc)), (i64 0), GPR:$rs1), (VT_MASKC GPR:$rs1, GPR:$rc)>; // Conditional AND operation patterns. -def : Pat<(i64 (select GPR:$rc, (and GPR:$rs1, GPR:$rs2), GPR:$rs1)), +def : Pat<(i64 (select (i64 GPR:$rc), (and GPR:$rs1, GPR:$rs2), GPR:$rs1)), (OR (AND $rs1, $rs2), (VT_MASKCN $rs1, $rc))>; -def : Pat<(i64 (select GPR:$rc, GPR:$rs1, (and GPR:$rs1, GPR:$rs2))), +def : Pat<(i64 (select (i64 GPR:$rc), GPR:$rs1, (and GPR:$rs1, GPR:$rs2))), (OR (AND $rs1, $rs2), (VT_MASKC $rs1, $rc))>; // Basic select pattern that selects between 2 registers. -def : Pat<(i64 (select GPR:$rc, GPR:$rs1, GPR:$rs2)), +def : Pat<(i64 (select (i64 GPR:$rc), GPR:$rs1, GPR:$rs2)), (OR (VT_MASKC $rs1, $rc), (VT_MASKCN $rs2, $rc))>; -def : Pat<(i64 (select (riscv_setne GPR:$rc), GPR:$rs1, GPR:$rs2)), +def : Pat<(i64 (select (riscv_setne (i64 GPR:$rc)), GPR:$rs1, GPR:$rs2)), (OR (VT_MASKC GPR:$rs1, GPR:$rc), (VT_MASKCN GPR:$rs2, GPR:$rc))>; -def : Pat<(i64 (select (riscv_seteq GPR:$rc), GPR:$rs2, GPR:$rs1)), +def : Pat<(i64 (select (riscv_seteq (i64 GPR:$rc)), GPR:$rs2, GPR:$rs1)), (OR (VT_MASKC GPR:$rs1, GPR:$rc), (VT_MASKCN GPR:$rs2, GPR:$rc))>; } // Predicates = [IsRV64, HasVendorXVentanaCondOps] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td @@ -502,9 +502,9 @@ //===----------------------------------------------------------------------===// let Predicates = [HasStdExtZbbOrZbkb] in { -def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>; -def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN GPR:$rs1, GPR:$rs2)>; -def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (or GPR:$rs1, (not GPR:$rs2))), (ORN GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbbOrZbkb] let Predicates = [HasStdExtZbbOrZbkb] in { @@ -514,7 +514,7 @@ def : PatGprImm; // There's no encoding for roli in the the 'B' extension as it can be // implemented with rori by negating the immediate. -def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt), +def : Pat<(XLenVT (rotl GPR:$rs1, uimmlog2xlen:$shamt)), (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>; } // Predicates = [HasStdExtZbbOrZbkb] @@ -527,48 +527,49 @@ } // Predicates = [HasStdExtZbbOrZbkb, IsRV64] let Predicates = [HasStdExtZbs] in { -def : Pat<(and (not (shiftop 1, GPR:$rs2)), GPR:$rs1), +def : Pat<(XLenVT (and (not (shiftop 1, (XLenVT GPR:$rs2))), GPR:$rs1)), (BCLR GPR:$rs1, GPR:$rs2)>; -def : Pat<(and (rotl -2, GPR:$rs2), GPR:$rs1), (BCLR GPR:$rs1, GPR:$rs2)>; -def : Pat<(or (shiftop 1, GPR:$rs2), GPR:$rs1), +def : Pat<(XLenVT (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)), + (BCLR GPR:$rs1, GPR:$rs2)>; +def : Pat<(XLenVT (or (shiftop 1, (XLenVT GPR:$rs2)), GPR:$rs1)), (BSET GPR:$rs1, GPR:$rs2)>; -def : Pat<(xor (shiftop 1, GPR:$rs2), GPR:$rs1), +def : Pat<(XLenVT (xor (shiftop 1, (XLenVT GPR:$rs2)), GPR:$rs1)), (BINV GPR:$rs1, GPR:$rs2)>; -def : Pat<(and (shiftop GPR:$rs1, GPR:$rs2), 1), +def : Pat<(XLenVT (and (shiftop GPR:$rs1, (XLenVT GPR:$rs2)), 1)), (BEXT GPR:$rs1, GPR:$rs2)>; -def : Pat<(shiftop 1, GPR:$rs2), - (BSET X0, GPR:$rs2)>; +def : Pat<(XLenVT (shiftop 1, (XLenVT GPR:$rs2))), + (BSET (XLenVT X0), GPR:$rs2)>; -def : Pat<(and GPR:$rs1, BCLRMask:$mask), +def : Pat<(XLenVT (and GPR:$rs1, BCLRMask:$mask)), (BCLRI GPR:$rs1, BCLRMask:$mask)>; -def : Pat<(or GPR:$rs1, SingleBitSetMask:$mask), +def : Pat<(XLenVT (or GPR:$rs1, SingleBitSetMask:$mask)), (BSETI GPR:$rs1, SingleBitSetMask:$mask)>; -def : Pat<(xor GPR:$rs1, SingleBitSetMask:$mask), +def : Pat<(XLenVT (xor GPR:$rs1, SingleBitSetMask:$mask)), (BINVI GPR:$rs1, SingleBitSetMask:$mask)>; -def : Pat<(and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1)), +def : Pat<(XLenVT (and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1))), (BEXTI GPR:$rs1, uimmlog2xlen:$shamt)>; -def : Pat<(seteq (and GPR:$rs1, SingleBitSetMask:$mask), 0), +def : Pat<(XLenVT (seteq (XLenVT (and GPR:$rs1, SingleBitSetMask:$mask)), 0)), (BEXTI (XORI GPR:$rs1, -1), SingleBitSetMask:$mask)>; -def : Pat<(or GPR:$r, BSETINVTwoBitsMask:$i), +def : Pat<(XLenVT (or GPR:$r, BSETINVTwoBitsMask:$i)), (BSETI (BSETI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)), (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>; -def : Pat<(xor GPR:$r, BSETINVTwoBitsMask:$i), +def : Pat<(XLenVT (xor GPR:$r, BSETINVTwoBitsMask:$i)), (BINVI (BINVI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)), (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>; -def : Pat<(or GPR:$r, BSETINVORIMask:$i), +def : Pat<(XLenVT (or GPR:$r, BSETINVORIMask:$i)), (BSETI (ORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)), (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>; -def : Pat<(xor GPR:$r, BSETINVORIMask:$i), +def : Pat<(XLenVT (xor GPR:$r, BSETINVORIMask:$i)), (BINVI (XORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)), (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>; -def : Pat<(and GPR:$r, BCLRITwoBitsMask:$i), +def : Pat<(XLenVT (and GPR:$r, BCLRITwoBitsMask:$i)), (BCLRI (BCLRI GPR:$r, (BCLRITwoBitsMaskLow BCLRITwoBitsMask:$i)), (BCLRITwoBitsMaskHigh BCLRITwoBitsMask:$i))>; -def : Pat<(and GPR:$r, BCLRIANDIMask:$i), +def : Pat<(XLenVT (and GPR:$r, BCLRIANDIMask:$i)), (BCLRI (ANDI GPR:$r, (BCLRIANDIMaskLow BCLRIANDIMask:$i)), (BCLRITwoBitsMaskHigh BCLRIANDIMask:$i))>; } // Predicates = [HasStdExtZbs] @@ -597,12 +598,12 @@ def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>; def : Pat<(i64 (riscv_absw GPR:$rs1)), - (MAX GPR:$rs1, (SUBW X0, GPR:$rs1))>; + (MAX GPR:$rs1, (SUBW (XLenVT X0), GPR:$rs1))>; } // Predicates = [HasStdExtZbb, IsRV64] let Predicates = [HasStdExtZbb] in { -def : Pat<(sext_inreg GPR:$rs1, i8), (SEXT_B GPR:$rs1)>; -def : Pat<(sext_inreg GPR:$rs1, i16), (SEXT_H GPR:$rs1)>; +def : Pat<(XLenVT (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>; +def : Pat<(XLenVT (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>; } // Predicates = [HasStdExtZbb] let Predicates = [HasStdExtZbb] in { @@ -620,29 +621,29 @@ let Predicates = [HasStdExtZbkb] in { def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF), - (zexti8 GPR:$rs1)), + (zexti8 (XLenVT GPR:$rs1))), (PACKH GPR:$rs1, GPR:$rs2)>; -def : Pat<(or (shl (zexti8 GPR:$rs2), (XLenVT 8)), - (zexti8 GPR:$rs1)), +def : Pat<(or (shl (zexti8 (XLenVT GPR:$rs2)), (XLenVT 8)), + (zexti8 (XLenVT GPR:$rs1))), (PACKH GPR:$rs1, GPR:$rs2)>; def : Pat<(and (or (shl GPR:$rs2, (XLenVT 8)), - (zexti8 GPR:$rs1)), 0xFFFF), + (zexti8 (XLenVT GPR:$rs1))), 0xFFFF), (PACKH GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbkb] let Predicates = [HasStdExtZbkb, IsRV32] in -def : Pat<(i32 (or (zexti16 GPR:$rs1), (shl GPR:$rs2, (i32 16)))), +def : Pat<(i32 (or (zexti16 (i32 GPR:$rs1)), (shl GPR:$rs2, (i32 16)))), (PACK GPR:$rs1, GPR:$rs2)>; let Predicates = [HasStdExtZbkb, IsRV64] in { -def : Pat<(i64 (or (zexti32 GPR:$rs1), (shl GPR:$rs2, (i64 32)))), +def : Pat<(i64 (or (zexti32 (i64 GPR:$rs1)), (shl GPR:$rs2, (i64 32)))), (PACK GPR:$rs1, GPR:$rs2)>; def : Pat<(binop_allwusers (shl GPR:$rs2, (i64 16)), - (zexti16 GPR:$rs1)), + (zexti16 (i64 GPR:$rs1))), (PACKW GPR:$rs1, GPR:$rs2)>; def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32), - (zexti16 GPR:$rs1))), + (zexti16 (i64 GPR:$rs1)))), (PACKW GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbkb, IsRV64] @@ -686,20 +687,20 @@ def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2), (SH3ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; -def : Pat<(add GPR:$r, CSImm12MulBy4:$i), - (SH2ADD (ADDI X0, (SimmShiftRightBy2XForm CSImm12MulBy4:$i)), +def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i), + (SH2ADD (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i)), GPR:$r)>; -def : Pat<(add GPR:$r, CSImm12MulBy8:$i), - (SH3ADD (ADDI X0, (SimmShiftRightBy3XForm CSImm12MulBy8:$i)), +def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i), + (SH3ADD (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i)), GPR:$r)>; -def : Pat<(mul GPR:$r, C3LeftShift:$i), +def : Pat<(mul (XLenVT GPR:$r), C3LeftShift:$i), (SLLI (SH1ADD GPR:$r, GPR:$r), (TrailingZeros C3LeftShift:$i))>; -def : Pat<(mul GPR:$r, C5LeftShift:$i), +def : Pat<(mul (XLenVT GPR:$r), C5LeftShift:$i), (SLLI (SH2ADD GPR:$r, GPR:$r), (TrailingZeros C5LeftShift:$i))>; -def : Pat<(mul GPR:$r, C9LeftShift:$i), +def : Pat<(mul (XLenVT GPR:$r), C9LeftShift:$i), (SLLI (SH3ADD GPR:$r, GPR:$r), (TrailingZeros C9LeftShift:$i))>; @@ -738,7 +739,7 @@ def : Pat<(i64 (add (and GPR:$rs1, 0xFFFFFFFF), non_imm12:$rs2)), (ADD_UW GPR:$rs1, GPR:$rs2)>; -def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, X0)>; +def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>; def : Pat<(i64 (add (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 1)), non_imm12:$rs2)), (SH1ADD_UW GPR:$rs1, GPR:$rs2)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td @@ -212,10 +212,10 @@ def: Pat<(any_fceil FPR32:$rs1), (FROUND_S FPR32:$rs1, FRM_RUP)>; def: Pat<(any_ftrunc FPR32:$rs1), (FROUND_S FPR32:$rs1, FRM_RTZ)>; -def: PatSetCC; -def: PatSetCC; -def: PatSetCC; -def: PatSetCC; +def: PatSetCC; +def: PatSetCC; +def: PatSetCC; +def: PatSetCC; } // Predicates = [HasStdExtZfa] let Predicates = [HasStdExtZfa, HasStdExtD] in { @@ -235,10 +235,10 @@ def: Pat<(any_fceil FPR64:$rs1), (FROUND_D FPR64:$rs1, FRM_RUP)>; def: Pat<(any_ftrunc FPR64:$rs1), (FROUND_D FPR64:$rs1, FRM_RTZ)>; -def: PatSetCC; -def: PatSetCC; -def: PatSetCC; -def: PatSetCC; +def: PatSetCC; +def: PatSetCC; +def: PatSetCC; +def: PatSetCC; } // Predicates = [HasStdExtZfa, HasStdExtD] let Predicates = [HasStdExtZfa, HasStdExtD, IsRV32] in { @@ -263,8 +263,8 @@ def: Pat<(any_fceil FPR16:$rs1), (FROUND_H FPR16:$rs1, FRM_RUP)>; def: Pat<(any_ftrunc FPR16:$rs1), (FROUND_H FPR16:$rs1, FRM_RTZ)>; -def: PatSetCC; -def: PatSetCC; -def: PatSetCC; -def: PatSetCC; +def: PatSetCC; +def: PatSetCC; +def: PatSetCC; +def: PatSetCC; } // Predicates = [HasStdExtZfa, HasStdExtZfh] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -356,59 +356,59 @@ // Match non-signaling FEQ_D foreach Ext = ZfhExts in { - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; } let Predicates = [HasStdExtZfh] in { // Match signaling FEQ_H -def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ)), (AND (FLE_H $rs1, $rs2), (FLE_H $rs2, $rs1))>; -def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETOEQ)), (AND (FLE_H $rs1, $rs2), (FLE_H $rs2, $rs1))>; // If both operands are the same, use a single FLE. -def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETEQ)), (FLE_H $rs1, $rs1)>; -def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ)), (FLE_H $rs1, $rs1)>; } // Predicates = [HasStdExtZfh] let Predicates = [HasStdExtZhinx] in { // Match signaling FEQ_H -def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETEQ)), (AND (FLE_H_INX $rs1, $rs2), (FLE_H_INX $rs2, $rs1))>; -def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETOEQ)), (AND (FLE_H_INX $rs1, $rs2), (FLE_H_INX $rs2, $rs1))>; // If both operands are the same, use a single FLE. -def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETEQ)), (FLE_H_INX $rs1, $rs1)>; -def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETOEQ), +def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETOEQ)), (FLE_H_INX $rs1, $rs1)>; } // Predicates = [HasStdExtZhinx] foreach Ext = ZfhExts in { - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; - defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; + defm : PatSetCC_m; } let Predicates = [HasStdExtZfh] in { -defm Select_FPR16 : SelectCC_GPR_rrirr; +defm Select_FPR16 : SelectCC_GPR_rrirr; def PseudoFROUND_H : PseudoFROUND; } // Predicates = [HasStdExtZfh] let Predicates = [HasStdExtZhinx] in { -defm Select_FPR16INX : SelectCC_GPR_rrirr; +defm Select_FPR16INX : SelectCC_GPR_rrirr; def PseudoFROUND_H_INX : PseudoFROUND; } // Predicates = [HasStdExtZhinx] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td @@ -30,32 +30,32 @@ let Predicates = [HasStdExtZicond] in { // Directly use CZERO_EQZ/CZERO_NEZ in case of any of the operands being 0. -def : Pat<(select GPR:$rc, GPR:$rs1, 0), +def : Pat<(XLenVT (select (XLenVT GPR:$rc), GPR:$rs1, 0)), (CZERO_EQZ GPR:$rs1, GPR:$rc)>; -def : Pat<(select GPR:$rc, 0, GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT GPR:$rc), 0, GPR:$rs1)), (CZERO_NEZ GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, 0), +def : Pat<(XLenVT (select (riscv_setne (XLenVT GPR:$rc)), GPR:$rs1, 0)), (CZERO_EQZ GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs1, 0), +def : Pat<(XLenVT (select (riscv_seteq (XLenVT GPR:$rc)), GPR:$rs1, 0)), (CZERO_NEZ GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_setne GPR:$rc), 0, GPR:$rs1), +def : Pat<(XLenVT (select (riscv_setne (XLenVT GPR:$rc)), 0, GPR:$rs1)), (CZERO_NEZ GPR:$rs1, GPR:$rc)>; -def : Pat<(select (riscv_seteq GPR:$rc), 0, GPR:$rs1), +def : Pat<(XLenVT (select (riscv_seteq (XLenVT GPR:$rc)), 0, GPR:$rs1)), (CZERO_EQZ GPR:$rs1, GPR:$rc)>; // Conditional AND operation patterns. -def : Pat<(select GPR:$rc, (and GPR:$rs1, GPR:$rs2), GPR:$rs1), +def : Pat<(XLenVT (select (XLenVT GPR:$rc), (and GPR:$rs1, GPR:$rs2), GPR:$rs1)), (OR (AND $rs1, $rs2), (CZERO_NEZ $rs1, $rc))>; -def : Pat<(select GPR:$rc, GPR:$rs1, (and GPR:$rs1, GPR:$rs2)), +def : Pat<(XLenVT (select (XLenVT GPR:$rc), GPR:$rs1, (and GPR:$rs1, GPR:$rs2))), (OR (AND $rs1, $rs2), (CZERO_EQZ $rs1, $rc))>; // Basic select pattern that selects between 2 registers. -def : Pat<(select GPR:$rc, GPR:$rs1, GPR:$rs2), +def : Pat<(XLenVT (select (XLenVT GPR:$rc), GPR:$rs1, GPR:$rs2)), (OR (CZERO_EQZ $rs1, $rc), (CZERO_NEZ $rs2, $rc))>; -def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, GPR:$rs2), +def : Pat<(XLenVT (select (riscv_setne (XLenVT GPR:$rc)), GPR:$rs1, GPR:$rs2)), (OR (CZERO_EQZ GPR:$rs1, GPR:$rc), (CZERO_NEZ GPR:$rs2, GPR:$rc))>; -def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs2, GPR:$rs1), +def : Pat<(XLenVT (select (riscv_seteq (XLenVT GPR:$rc)), GPR:$rs2, GPR:$rs1)), (OR (CZERO_EQZ GPR:$rs1, GPR:$rc), (CZERO_NEZ GPR:$rs2, GPR:$rc))>; } // Predicates = [HasStdExtZicond] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td @@ -134,7 +134,7 @@ //===----------------------------------------------------------------------===// class PatGprGprByteSelect - : Pat<(OpNode GPR:$rs1, GPR:$rs2, byteselect:$imm), + : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), (XLenVT GPR:$rs2), byteselect:$imm)), (Inst GPR:$rs1, GPR:$rs2, byteselect:$imm)>; // Zknd diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -115,13 +115,16 @@ def XLenVT : ValueTypeByHwMode<[RV32, RV64], [i32, i64]>; +// Floating point class with XLen bits. +def XLenFVT : ValueTypeByHwMode<[RV32, RV64], + [f32, f64]>; def XLenRI : RegInfoByHwMode< [RV32, RV64], [RegInfo<32,32,32>, RegInfo<64,64,64>]>; // The order of registers represents the preferred allocation sequence. // Registers are listed in the order caller-save, callee-save, specials. -def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add +def GPR : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add (sequence "X%u", 10, 17), (sequence "X%u", 5, 7), (sequence "X%u", 28, 31), @@ -132,15 +135,15 @@ let RegInfos = XLenRI; } -def GPRX0 : RegisterClass<"RISCV", [XLenVT], 32, (add X0)> { +def GPRX0 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add X0)> { let RegInfos = XLenRI; } -def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, X0)> { +def GPRNoX0 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (sub GPR, X0)> { let RegInfos = XLenRI; } -def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, X0, X2)> { +def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (sub GPR, X0, X2)> { let RegInfos = XLenRI; } @@ -148,11 +151,11 @@ // stack on some microarchitectures. Also remove the reserved registers X0, X2, // X3, and X4 as it reduces the number of register classes that get synthesized // by tablegen. -def GPRJALR : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, (sequence "X%u", 0, 5))> { +def GPRJALR : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (sub GPR, (sequence "X%u", 0, 5))> { let RegInfos = XLenRI; } -def GPRC : RegisterClass<"RISCV", [XLenVT], 32, (add +def GPRC : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add (sequence "X%u", 10, 15), (sequence "X%u", 8, 9) )> { @@ -163,7 +166,7 @@ // restored to the saved value before the tail call, which would clobber a call // address. We shouldn't use x5 since that is a hint for to pop the return // address stack on some microarchitectures. -def GPRTC : RegisterClass<"RISCV", [XLenVT], 32, (add +def GPRTC : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add (sequence "X%u", 6, 7), (sequence "X%u", 10, 17), (sequence "X%u", 28, 31) @@ -171,12 +174,12 @@ let RegInfos = XLenRI; } -def SP : RegisterClass<"RISCV", [XLenVT], 32, (add X2)> { +def SP : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add X2)> { let RegInfos = XLenRI; } // Saved Registers from s0 to s7, for C.MVA01S07 instruction in Zcmp extension -def SR07 : RegisterClass<"RISCV", [XLenVT], 32, (add +def SR07 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add (sequence "X%u", 8, 9), (sequence "X%u", 18, 23) )> { @@ -542,7 +545,6 @@ let RegInfos = XLenRI in { def GPRF16 : RegisterClass<"RISCV", [f16], 16, (add GPR)>; def GPRF32 : RegisterClass<"RISCV", [f32], 32, (add GPR)>; -def GPRF64 : RegisterClass<"RISCV", [f64], 64, (add GPR)>; } // RegInfos = XLenRI let RegAltNameIndices = [ABIRegAltName] in {