Index: lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp =================================================================== --- lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -63,9 +63,13 @@ bool parseOperand(OperandVector &Operands); + unsigned checkEarlyTargetMatchPredicate(MCInst &Inst, + const OperandVector &Operands) override; + public: enum RISCVMatchResultTy { Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY, + Match_RequiresSameSrcAndDst, #define GET_OPERAND_DIAGNOSTIC_TYPES #include "RISCVGenAsmMatcher.inc" #undef GET_OPERAND_DIAGNOSTIC_TYPES @@ -149,6 +153,17 @@ return Ret; } + template bool isImmediate() const { + int64_t Imm; + RISCVMCExpr::VariantKind VK; + if (!isImm()) + return false; + bool IsConstantImm = evaluateConstantImm(Imm, VK); + if (!IsConstantImm) + return false; + return Imm == 0; + } + // True if operand is a symbol with no modifiers, or a constant with no // modifiers and isShiftedInt(Op). template bool isBareSimmNLsb0() const { @@ -492,6 +507,19 @@ assert(N == 1 && "Invalid number of operands!"); Inst.addOperand(MCOperand::createImm(getRoundingMode())); } + + bool isValidForTie(const RISCVOperand &Other) const { + if (Kind != Other.Kind) + return false; + + switch (Kind) { + default: + llvm_unreachable("Unexpected kind"); + return false; + case Register: + return getReg() == Other.getReg(); + } + } }; } // end anonymous namespace. @@ -584,6 +612,8 @@ Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, getSTI()); return false; + case Match_RequiresSameSrcAndDst: + return Error(IDLoc, "instruction use requires same src and destination."); case Match_MissingFeature: return Error(IDLoc, "instruction use requires an option to be enabled"); case Match_MnemonicFail: @@ -859,6 +889,51 @@ return true; } +unsigned +RISCVAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst, + const OperandVector &Operands) { + StringRef ExpandedOp; + + switch (Inst.getOpcode()) { + default: + return Match_Success; + case RISCV::C_ADD: + ExpandedOp = "add"; break; + case RISCV::C_ADDW: + ExpandedOp = "addw"; break; + case RISCV::C_SUB: + ExpandedOp = "sub"; break; + case RISCV::C_SUBW: + ExpandedOp = "subw"; break; + case RISCV::C_AND: + ExpandedOp = "and"; break; + case RISCV::C_OR: + ExpandedOp = "or"; break; + case RISCV::C_XOR: + ExpandedOp = "xor"; break; + case RISCV::C_ADDI: + ExpandedOp = "addi"; break; + case RISCV::C_ANDI: + ExpandedOp = "andi"; break; + case RISCV::C_SLLI: + ExpandedOp = "slli"; break; + case RISCV::C_SRLI: + ExpandedOp = "srli"; break; + case RISCV::C_SRAI: + ExpandedOp = "srai"; break; + } + + if (((RISCVOperand&)*Operands[0]).getToken() != ExpandedOp) + return Match_Success; + + // Check for tied src and dst operands. + if (static_cast(*Operands[1]) + .isValidForTie(static_cast(*Operands[2]))) + return Match_Success; + + return Match_RequiresSameSrcAndDst; +} + bool RISCVAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { Index: lib/Target/RISCV/RISCVInstrInfoC.td =================================================================== --- lib/Target/RISCV/RISCVInstrInfoC.td +++ lib/Target/RISCV/RISCVInstrInfoC.td @@ -105,6 +105,20 @@ let DecoderMethod = "decodeSImmOperandAndLsl1<12>"; } +// Immediate operands with a shared generic asm render method. +class ImmValAsmOperand : AsmOperandClass { + let RenderMethod = "addImmOperands"; + let PredicateMethod = "isImmediate<" # Value # ">"; + let DiagnosticString = "operand must be immediate value [" # Low # "]"; +} + +def ImmZeroAsmOperand: ImmValAsmOperand<0> { let Name = "ImmZero"; } +def immZero : Operand, ImmLeaf { + let ParserMatchClass = ImmZeroAsmOperand; +} + + + //===----------------------------------------------------------------------===// // Instruction Class Templates //===----------------------------------------------------------------------===// @@ -364,7 +378,7 @@ } let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in -def C_JR : RVInst16CR<0b1000, 0b10, (outs), (ins GPRNoX0:$rs1), +def C_JR : RVInst16CR<0b1000, 0b10, (outs), (ins GPRNoX0:$rs1, immZero:$rs2), "c.jr", "$rs1"> { let isBranch = 1; let isBarrier = 1; @@ -382,7 +396,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall=1, Defs=[X1], rs2 = 0 in -def C_JALR : RVInst16CR<0b1001, 0b10, (outs), (ins GPRNoX0:$rs1), +def C_JALR : RVInst16CR<0b1001, 0b10, (outs), (ins GPRNoX0:$rs1, immZero:$rs2), "c.jalr", "$rs1">; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in @@ -416,4 +430,64 @@ let Inst{9-7} = imm{8-6}; } +def : InstAlias<"add $rs1, $rs1, $rs2", (C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>; +def : InstAlias<"and $rs1, $rs1, $rs2", (C_AND GPRC:$rs1, GPRC:$rs2)>; +def : InstAlias<"or $rs1, $rs1, $rs2", (C_OR GPRC:$rs1, GPRC:$rs2)>; +def : InstAlias<"xor $rs1, $rs1, $rs2", (C_XOR GPRC:$rs1, GPRC:$rs2)>; +def : InstAlias<"sub $rs1, $rs1, $rs2", (C_SUB GPRC:$rs1, GPRC:$rs2)>; +def : InstAlias<"addi $rs1, $rs1, $imm", (C_ADDI GPRNoX0:$rs1, simm6:$imm)>; +def : InstAlias<"andi $rs1, $rs1, $imm", (C_ANDI GPRC:$rs1, simm6:$imm)>; +def : InstAlias<"slli $rs1, $rs1, $imm", (C_SLLI GPRC:$rs1, uimmlog2xlennonzero:$imm)>; +def : InstAlias<"srli $rs1, $rs1, $imm", (C_SRLI GPRC:$rs1, uimmlog2xlennonzero:$imm)>; +def : InstAlias<"srai $rs1, $rs1, $imm", (C_SRAI GPRC:$rs1, uimmlog2xlennonzero:$imm)>; + +def : InstAlias<"jal x0, $offset", (C_J simm12_lsb0:$offset)>; +def : InstAlias<"jal x1, $offset", (C_JAL simm12_lsb0:$offset)>; +def : InstAlias<"beq, $rs1, x0, $imm", (C_BEQZ GPRC:$rs1, simm9_lsb0:$imm)>; +def : InstAlias<"bne, $rs1, x0, $imm", (C_BNEZ GPRC:$rs1, simm9_lsb0:$imm)>; +def : InstAlias<"addi $rd, x0, $imm", (C_LI GPRNoX0:$rd, simm6:$imm)>; +def : InstAlias<"add $rs1, x0, $rs2", (C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>; +def : InstAlias<"add $rs1, $rs2, x0", (C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>; +def : InstAlias<"addi x2, x2, $imm", (C_ADDI16SP X2, simm10_lsb0000:$imm)>; +def : InstAlias<"addi $rd, $rs1, $imm", (C_ADDI4SPN GPRC:$rd, SP:$rs1, uimm10_lsb00nonzero:$imm), 0>; +def : InstAlias<"lui $rd, $imm", (C_LUI GPRNoX0X2:$rd, uimm6nonzero:$imm), 0>; +def : InstAlias<"lw $rd, ${imm}(${rs1})", (C_LW GPRC:$rd, GPRC:$rs1, uimm7_lsb00:$imm), 0>; +def : InstAlias<"lw $rd, ${imm}(${rs1})", (C_LWSP GPRNoX0:$rd, SP:$rs1, uimm8_lsb00:$imm), 0>; +def : InstAlias<"sw $rs2, ${imm}(${rs1})", (C_SW GPRC:$rs2, GPRC:$rs1, uimm7_lsb00:$imm), 0>; +def : InstAlias<"sw $rs2, ${imm}(${rs1})", (C_SWSP GPR:$rs2, SP:$rs1, uimm8_lsb00:$imm), 0>; + +def : InstAlias<"jalr x0, $rs1, $imm", (C_JR GPRNoX0:$rs1, immZero:$imm)>; +def : InstAlias<"jalr x1, $rs1, $imm", (C_JALR GPRNoX0:$rs1, immZero:$imm)>; + + } // Predicates = [HasStdExtC] + +let Predicates = [HasStdExtC, IsRV64] in { +def : InstAlias<"addw $rs1, $rs1, $rs2", (C_ADDW GPRC:$rs1, GPRC:$rs2)>; +def : InstAlias<"subw $rs1, $rs1, $rs2", (C_SUBW GPRC:$rs1, GPRC:$rs2)>; + +def : InstAlias<"addiw $rs1, $rs1, $imm", (C_ADDIW GPRNoX0:$rs1, simm6:$imm)>; + +def : InstAlias<"ld $rd, ${imm}(${rs1})", (C_LD GPRC:$rd, GPRC:$rs1, uimm8_lsb000:$imm), 0>; +def : InstAlias<"ld $rd, ${imm}(${rs1})", (C_LDSP GPRNoX0:$rd, SP:$rs1, uimm9_lsb000:$imm), 0>; +def : InstAlias<"sd $rs2, ${imm}(${rs1})", (C_SD GPRC:$rs2, GPRC:$rs1, uimm8_lsb000:$imm), 0>; +def : InstAlias<"sd $rs2, ${imm}(${rs1})", (C_SDSP GPR:$rs2, SP:$rs1, uimm9_lsb000:$imm), 0>; + + +} // Predicates = [HasStdExtC, IsRV64] + +let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in { +def : InstAlias<"flw $rd, ${imm}(${rs1})", (C_FLW FPR32C:$rd, GPRC:$rs1, uimm7_lsb00:$imm), 0>; +def : InstAlias<"flw $rd, ${imm}(${rs1})", (C_FLWSP FPR32:$rd, SP:$rs1, uimm8_lsb00:$imm), 0>; +def : InstAlias<"fsw $rs2, ${imm}(${rs1})", (C_FSW FPR32C:$rs2, GPRC:$rs1, uimm7_lsb00:$imm), 0>; +def : InstAlias<"fsw $rs2, ${imm}(${rs1})", (C_FSWSP FPR32:$rs2, SP:$rs1, uimm8_lsb00:$imm), 0>; +} +let Predicates = [HasStdExtC, HasStdExtD] in { +def : InstAlias<"fld $rd, ${imm}(${rs1})", (C_FLD FPR64C:$rd, GPRC:$rs1, uimm8_lsb000:$imm), 0>; +def : InstAlias<"fld $rd, ${imm}(${rs1})", (C_FLDSP FPR64:$rd, SP:$rs1, uimm9_lsb000:$imm)>; +def : InstAlias<"fsd $rs2, ${imm}(${rs1})", (C_FSD FPR64C:$rs2, GPRC:$rs1, uimm8_lsb000:$imm)>; +def : InstAlias<"fsd $rs2, ${imm}(${rs1})", (C_FSDSP FPR64:$rs2, SP:$rs1, uimm9_lsb000:$imm)>; +} + +// todo: missing few D and Q aliases, nop + Index: test/MC/RISCV/compressed32-alias.s =================================================================== --- /dev/null +++ test/MC/RISCV/compressed32-alias.s @@ -0,0 +1,97 @@ +# This test checks that we are able to generate compressed +# instructions from the expanded form defined with InstAlias<>. + +#RUN: llvm-mc -triple riscv32 -mattr=+c,+f,+d < %s | FileCheck %s + +#Load and Store Instructions + +lw x1, 252 (x2) +#CHECK: c.lwsp ra, 252(sp) +sw x0, 252 (x2) +#CHECK: c.swsp zero, 252(sp) +lw x8, 124 (x15) +#CHECK: c.lw s0, 124(a5) +sw x8, 124 (x15) +#CHECK: c.sw s0, 124(a5) + +#Load and Store Instructions 32 bit only +flw f0, 124(x2) +#CHECK: c.flwsp ft0, 124(sp) +fsw f0, 124(x2) +#CHECK: c.fswsp ft0, 124(sp) +flw f8, 124(x8) +#CHECK: c.flw fs0, 124(s0) +fsw f8, 124(x8) +#CHECK: c.fsw fs0, 124(s0) + +#Load and Store Instructions 32 and 64 bit only +fld f0, 64 (x2) +#CHECK: c.fldsp ft0, 64(sp) +fsd f0, 64 (x2) +#CHECK: c.fsdsp ft0, 64(sp) +fld f8, 248(x8) +#CHECK: c.fld fs0, 248(s0) +fsd f8, 248(x8) +#CHECK: c.fsd fs0, 248(s0) + +# Control Transfer Instructions + +jal x0, -2048 +#CHECK: c.j -2048 +jal x1, 2046 +#CHECK: c.jal 2046 +jalr x0, x1, 0 +#CHECK: c.jr ra +jalr x1, x8, 0 +#CHECK: c.jalr s0 + +beq x8, x0, -256 +#CHECK: c.beqz s0, -256 +bne x8, x0, 254 +#CHECK: c.bnez s0, 254 + +# Integer Computational Instructions + +addi x1, x0, -31 +#CHECK: c.li ra, -31 +lui x3, 63 +#CHECK: c.lui gp, 63 +addi x1, x1, -32 +#CHECK: c.addi ra, -32 +addi x2, x2, -32 +#CHECK: c.addi16sp sp, -32 + +slli x8, x8, 31 +#CHECK: c.slli s0, 31 +srli x8, x8, 31 +#CHECK: c.srli s0, 31 +srai x8, x8, 31 +#CHECK: c.srai s0, 31 + +andi x8, x8, 31 +#CHECK: c.andi s0, 31 + +# Integer Computational Instructions 32/64 bit only +addi x8, x2, 1020 +#CHECK: c.addi4spn s0, sp, 1020 + +# Integer Register-Tegister Operations +#CHECK: c.mv s0, a5 +add x8, x0, x15 +#CHECK: c.add s0, a5 +add x8, x8, x15 +#CHECK: c.and s0, a5 +and x8, x8, x15 +#CHECK: c.or s0, a5 +or x8, x8, x15 +#CHECK: c.xor s0, a5 +xor x8, x8, x15 +#CHECK: c.sub s0, a5 +sub x8, x8, x15 + +#c.nop +#addi x0, x0, 0 +#c.ebreak +#c.add x0, x0 + + Index: test/MC/RISCV/compressed64-alias.s =================================================================== --- /dev/null +++ test/MC/RISCV/compressed64-alias.s @@ -0,0 +1,126 @@ +# This test checks that we are able to generate compressed +# instructions from the expanded form defined with InstAlias<>. + +#RUN: llvm-mc -triple riscv64 -mattr=+c,+f,+d < %s | FileCheck %s + +#Load and Store Instructions + +lw x1, 252 (x2) +#CHECK: c.lwsp ra, 252(sp) +sw x0, 252 (x2) +#CHECK: c.swsp zero, 252(sp) +lw x8, 124 (x15) +#CHECK: c.lw s0, 124(a5) +sw x8, 124 (x15) +#CHECK: c.sw s0, 124(a5) + +#Load and Store Instructions 32 and 64 bit only +fld f0, 64 (x2) +#CHECK: c.fldsp ft0, 64(sp) +fsd f0, 64 (x2) +#CHECK: c.fsdsp ft0, 64(sp) +fld f8, 248(x8) +#CHECK: c.fld fs0, 248(s0) +fsd f8, 248(x8) +#CHECK: c.fsd fs0, 248(s0) + +#Load and Store Instructions 64/128bit +ld x8, 248 (x15) +#CHECK: c.ld s0, 248(a5) +ld x1, 248 (x2) +#CHECK: c.ldsp ra, 248(sp) +sd x8, 64(x2) +#CHECK: c.sdsp s0, 64(sp) +sd x8, 64 (x15) +#CHECK: c.sd s0, 64(a5) + +#Load and Store Instructions 128bit +# lq x8, 252 (x15) +# c.lq x8, 252 (x15) +# lq x1, 252 (x2) +# c.ldsp x1, 252 (x2) +# sq x1, 252 (x2) +# c.sqsp x1, 252 (x2) +# sq x8, 252 (x15) + +# Control Transfer Instructions + +jal x0, -2048 +#CHECK: c.j -2048 +jal x1, 2046 +#CHECK: c.jal 2046 +jalr x0, x1, 0 +#CHECK: c.jr ra +jalr x1, x8, 0 +#CHECK: c.jalr s0 + +beq x8, x0, -256 +#CHECK: c.beqz s0, -256 +bne x8, x0, 254 +#CHECK: c.bnez s0, 254 + +# Integer Computational Instructions + +addi x1, x0, -31 +#CHECK: c.li ra, -31 +lui x3, 63 +#CHECK: c.lui gp, 63 +addi x1, x1, -32 +#CHECK: c.addi ra, -32 +addi x2, x2, -32 +#CHECK: c.addi16sp sp, -32 + +slli x8, x8, 31 +#CHECK: c.slli s0, 31 +srli x8, x8, 31 +#CHECK: c.srli s0, 31 +srai x8, x8, 31 +#CHECK: c.srai s0, 31 + +#CHECK: c.andi s0, 31 +andi x8, x8, 31 + +# Integer Computational Instructions 32/64 bit only +addi x8, x2, 1020 +#CHECK: c.addi4spn s0, sp, 1020 + +# Integer Computational Instructions 64/128 bit only +addiw x4, x4, 31 +#CHECK: c.addiw tp, 31 +#sext.w x4 +# c.addiw tp, 0 + +# Integer Computational Instructions 128 bit only +#slli x8, x8, 64 +#c.slli x8, 0 +#srli x8, x8, 64 +#c.srli x8, 0 +#srai x8, x8, 64 +#c.srai x8, 0 + +# Integer Register-Tegister Operations +add x8, x0, x15 +#CHECK: c.mv s0, a5 +add x8, x8, x15 +#CHECK: c.add s0, a5 +and x8, x8, x15 +#CHECK: c.and s0, a5 +or x8, x8, x15 +#CHECK: c.or s0, a5 +xor x8, x8, x15 +#CHECK: c.xor s0, a5 +sub x8, x8, x15 +#CHECK: c.sub s0, a5 + + +# Integer Register-Tegister Operations 64/128 only +addw x8, x8, x15 +#CHECK:c.addw s0, a5 +subw x8, x8, x15 +#CHECK:c.subw s0, a5 +#c.nop +#addi x0, x0, 0 +#c.ebreak +#c.add x0, x0 + + Index: utils/TableGen/AsmMatcherEmitter.cpp =================================================================== --- utils/TableGen/AsmMatcherEmitter.cpp +++ utils/TableGen/AsmMatcherEmitter.cpp @@ -335,6 +335,8 @@ // Unrelated tokens and user classes are ordered by the name of their // root nodes, so that there is a consistent ordering between // unconnected trees. + llvm::errs() << "UserClass: " << findRoot()->ValueName << + " RHSUserClass: " << RHS.findRoot()->ValueName << "\n"; return findRoot()->ValueName < RHS.findRoot()->ValueName; } } else if (isRegisterClass()) { @@ -591,9 +593,20 @@ if (AsmOperands.size() != RHS.AsmOperands.size()) return AsmOperands.size() < RHS.AsmOperands.size(); + llvm::errs() << "AsmSring " << AsmString << " RHSAsmString " << RHS.AsmString << "\n"; // Compare lexicographically by operand. The matcher validates that other // orderings wouldn't be ambiguous using \see couldMatchAmbiguouslyWith(). for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) { + llvm::errs() << " AsmOperand[" << i <<"] opName " << AsmOperands[i].SrcOpName << + " Token " << AsmOperands[i].Token << " RHS opName " << RHS.AsmOperands[i].SrcOpName << + " Token " << RHS.AsmOperands[i].Token << "\n"; + + if (AsmOperands[i].Class->isUserClass() && + RHS.AsmOperands[i].Class->isUserClass() && + !AsmOperands[i].Class->isRelatedTo(*RHS.AsmOperands[i].Class)) + if (RequiredFeatures.size() != RHS.RequiredFeatures.size()) + return RequiredFeatures.size() > RHS.RequiredFeatures.size(); + if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class) return true; if (*RHS.AsmOperands[i].Class < *AsmOperands[i].Class) @@ -1526,7 +1539,7 @@ II->initialize(*this, SingletonRegisters, Variant, HasMnemonicFirst); // Validate the alias definitions. - II->validate(CommentDelimiter, false); + II->validate(CommentDelimiter, true); Matchables.push_back(std::move(II)); }