diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -965,93 +965,29 @@ } unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { + if (MI.isMetaInstruction()) + return 0; + unsigned Opcode = MI.getOpcode(); - switch (Opcode) { - default: { - if (MI.getParent() && MI.getParent()->getParent()) { - const auto MF = MI.getMF(); - const auto &TM = static_cast(MF->getTarget()); - const MCRegisterInfo &MRI = *TM.getMCRegisterInfo(); - const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo(); - const RISCVSubtarget &ST = MF->getSubtarget(); - if (isCompressibleInst(MI, &ST, MRI, STI)) - return 2; - } - return get(Opcode).getSize(); - } - case TargetOpcode::EH_LABEL: - case TargetOpcode::IMPLICIT_DEF: - case TargetOpcode::KILL: - case TargetOpcode::DBG_VALUE: - return 0; - // These values are determined based on RISCVExpandAtomicPseudoInsts, - // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the - // pseudos are expanded. - case RISCV::PseudoCALLReg: - case RISCV::PseudoCALL: - case RISCV::PseudoJump: - case RISCV::PseudoTAIL: - case RISCV::PseudoLLA: - case RISCV::PseudoLA: - case RISCV::PseudoLA_TLS_IE: - case RISCV::PseudoLA_TLS_GD: - return 8; - case RISCV::PseudoAtomicLoadNand32: - case RISCV::PseudoAtomicLoadNand64: - return 20; - case RISCV::PseudoMaskedAtomicSwap32: - case RISCV::PseudoMaskedAtomicLoadAdd32: - case RISCV::PseudoMaskedAtomicLoadSub32: - return 28; - case RISCV::PseudoMaskedAtomicLoadNand32: - return 32; - case RISCV::PseudoMaskedAtomicLoadMax32: - case RISCV::PseudoMaskedAtomicLoadMin32: - return 44; - case RISCV::PseudoMaskedAtomicLoadUMax32: - case RISCV::PseudoMaskedAtomicLoadUMin32: - return 36; - case RISCV::PseudoCmpXchg32: - case RISCV::PseudoCmpXchg64: - return 16; - case RISCV::PseudoMaskedCmpXchg32: - return 32; - case TargetOpcode::INLINEASM: - case TargetOpcode::INLINEASM_BR: { + if (Opcode == TargetOpcode::INLINEASM || + Opcode == TargetOpcode::INLINEASM_BR) { const MachineFunction &MF = *MI.getParent()->getParent(); const auto &TM = static_cast(MF.getTarget()); return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *TM.getMCAsmInfo()); } - case RISCV::PseudoVSPILL2_M1: - case RISCV::PseudoVSPILL2_M2: - case RISCV::PseudoVSPILL2_M4: - case RISCV::PseudoVSPILL3_M1: - case RISCV::PseudoVSPILL3_M2: - case RISCV::PseudoVSPILL4_M1: - case RISCV::PseudoVSPILL4_M2: - case RISCV::PseudoVSPILL5_M1: - case RISCV::PseudoVSPILL6_M1: - case RISCV::PseudoVSPILL7_M1: - case RISCV::PseudoVSPILL8_M1: - case RISCV::PseudoVRELOAD2_M1: - case RISCV::PseudoVRELOAD2_M2: - case RISCV::PseudoVRELOAD2_M4: - case RISCV::PseudoVRELOAD3_M1: - case RISCV::PseudoVRELOAD3_M2: - case RISCV::PseudoVRELOAD4_M1: - case RISCV::PseudoVRELOAD4_M2: - case RISCV::PseudoVRELOAD5_M1: - case RISCV::PseudoVRELOAD6_M1: - case RISCV::PseudoVRELOAD7_M1: - case RISCV::PseudoVRELOAD8_M1: { - // The values are determined based on expandVSPILL and expandVRELOAD that - // expand the pseudos depending on NF. - unsigned NF = isRVVSpillForZvlsseg(Opcode)->first; - return 4 * (2 * NF - 1); - } + + if (MI.getParent() && MI.getParent()->getParent()) { + const auto MF = MI.getMF(); + const auto &TM = static_cast(MF->getTarget()); + const MCRegisterInfo &MRI = *TM.getMCRegisterInfo(); + const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo(); + const RISCVSubtarget &ST = MF->getSubtarget(); + if (isCompressibleInst(MI, &ST, MRI, STI)) + return 2; } + return get(Opcode).getSize(); } bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1183,7 +1183,7 @@ // destination. // Define AsmString to print "call" when compile with -S flag. // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. -let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, hasSideEffects = 0, +let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, Size = 8, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), []> { let AsmString = "call\t$rd, $func"; @@ -1195,7 +1195,7 @@ // if the offset fits in a signed 21-bit immediate. // Define AsmString to print "call" when compile with -S flag. // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. -let isCall = 1, Defs = [X1], isCodeGenOnly = 0 in +let isCall = 1, Defs = [X1], isCodeGenOnly = 0, Size = 8 in def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), []> { let AsmString = "call\t$func"; } @@ -1220,7 +1220,7 @@ // expand to auipc and jalr while encoding. // Define AsmString to print "tail" when compile with -S flag. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2], - isCodeGenOnly = 0 in + Size = 8, isCodeGenOnly = 0 in def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), []> { let AsmString = "tail\t$dst"; } @@ -1235,28 +1235,28 @@ def : Pat<(riscv_tail (iPTR texternalsym:$dst)), (PseudoTAIL texternalsym:$dst)>; -let isCall = 0, isBarrier = 1, isBranch = 1, isTerminator = 1, +let isCall = 0, isBarrier = 1, isBranch = 1, isTerminator = 1, Size = 8, isCodeGenOnly = 0, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in def PseudoJump : Pseudo<(outs GPR:$rd), (ins pseudo_jump_symbol:$target), []> { let AsmString = "jump\t$target, $rd"; } -let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0, +let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, isAsmParserOnly = 1 in def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], "lla", "$dst, $src">; -let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0, +let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, isAsmParserOnly = 1 in def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], "la", "$dst, $src">; -let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0, +let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, isAsmParserOnly = 1 in def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], "la.tls.ie", "$dst, $src">; -let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0, +let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, isAsmParserOnly = 1 in def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], "la.tls.gd", "$dst, $src">; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td @@ -188,6 +188,7 @@ let hasSideEffects = 0; } +let Size = 20 in def PseudoAtomicLoadNand32 : PseudoAMO; // Ordering constants must be kept in sync with the AtomicOrdering enum in // AtomicOrdering.h. @@ -242,27 +243,35 @@ (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt, timm:$ordering)>; +let Size = 28 in def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO; def : PseudoMaskedAMOPat; +let Size = 28 in def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO; def : PseudoMaskedAMOPat; +let Size = 28 in def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO; def : PseudoMaskedAMOPat; +let Size = 32 in def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO; def : PseudoMaskedAMOPat; +let Size = 44 in def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax; def : PseudoMaskedAMOMinMaxPat; +let Size = 44 in def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax; def : PseudoMaskedAMOMinMaxPat; +let Size = 36 in def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax; def : PseudoMaskedAMOPat; +let Size = 36 in def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax; def : PseudoMaskedAMOPat; @@ -276,6 +285,7 @@ let mayLoad = 1; let mayStore = 1; let hasSideEffects = 0; + let Size = 16; } // Ordering constants must be kept in sync with the AtomicOrdering enum in @@ -304,6 +314,7 @@ let mayLoad = 1; let mayStore = 1; let hasSideEffects = 0; + let Size = 32; } def : Pat<(int_riscv_masked_cmpxchg_i32 @@ -347,6 +358,7 @@ /// 64-bit pseudo AMOs +let Size = 20 in def PseudoAtomicLoadNand64 : PseudoAMO; // Ordering constants must be kept in sync with the AtomicOrdering enum in // AtomicOrdering.h. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3899,11 +3899,13 @@ foreach lmul = MxList in { foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; - let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in { + let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1, + Size = !mul(4, !sub(!mul(nf, 2), 1)) in { def "PseudoVSPILL" # nf # "_" # lmul.MX : Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2, GPR:$vlenb), []>; } - let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in { + let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1, + Size = !mul(4, !sub(!mul(nf, 2), 1)) in { def "PseudoVRELOAD" # nf # "_" # lmul.MX : Pseudo<(outs vreg:$rs1), (ins GPR:$rs2, GPR:$vlenb), []>; }