diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp index 5898149c9fe1..8e0698966d05 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp @@ -1,410 +1,510 @@ //===-- RISCVMCCodeEmitter.cpp - Convert RISCV code to machine code -------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the RISCVMCCodeEmitter class. // //===----------------------------------------------------------------------===// #include "MCTargetDesc/RISCVFixupKinds.h" #include "MCTargetDesc/RISCVMCExpr.h" #include "MCTargetDesc/RISCVMCTargetDesc.h" #include "Utils/RISCVBaseInfo.h" #include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/Register.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstBuilder.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Casting.h" #include "llvm/Support/EndianStream.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "mccodeemitter" STATISTIC(MCNumEmitted, "Number of MC instructions emitted"); STATISTIC(MCNumFixups, "Number of MC fixups created"); namespace { class RISCVMCCodeEmitter : public MCCodeEmitter { RISCVMCCodeEmitter(const RISCVMCCodeEmitter &) = delete; void operator=(const RISCVMCCodeEmitter &) = delete; MCContext &Ctx; MCInstrInfo const &MCII; public: RISCVMCCodeEmitter(MCContext &ctx, MCInstrInfo const &MCII) : Ctx(ctx), MCII(MCII) {} ~RISCVMCCodeEmitter() override {} void encodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const override; void expandFunctionCall(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; void expandAddTPRel(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + void expandVMSGE(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + /// TableGen'erated function for getting the binary encoding for an /// instruction. uint64_t getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; /// Return binary encoding of operand. If the machine operand requires /// relocation, record the relocation and return zero. unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; unsigned getImmOpValueAsr1(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; unsigned getImmOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; unsigned getVMaskReg(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; private: FeatureBitset computeAvailableFeatures(const FeatureBitset &FB) const; void verifyInstructionPredicates(const MCInst &MI, const FeatureBitset &AvailableFeatures) const; }; } // end anonymous namespace MCCodeEmitter *llvm::createRISCVMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, MCContext &Ctx) { return new RISCVMCCodeEmitter(Ctx, MCII); } // Expand PseudoCALL(Reg), PseudoTAIL and PseudoJump to AUIPC and JALR with // relocation types. We expand those pseudo-instructions while encoding them, // meaning AUIPC and JALR won't go through RISCV MC to MC compressed // instruction transformation. This is acceptable because AUIPC has no 16-bit // form and C_JALR has no immediate operand field. We let linker relaxation // deal with it. When linker relaxation is enabled, AUIPC and JALR have a // chance to relax to JAL. // If the C extension is enabled, JAL has a chance relax to C_JAL. void RISCVMCCodeEmitter::expandFunctionCall(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { MCInst TmpInst; MCOperand Func; Register Ra; if (MI.getOpcode() == RISCV::PseudoTAIL) { Func = MI.getOperand(0); Ra = RISCV::X6; } else if (MI.getOpcode() == RISCV::PseudoCALLReg) { Func = MI.getOperand(1); Ra = MI.getOperand(0).getReg(); } else if (MI.getOpcode() == RISCV::PseudoCALL) { Func = MI.getOperand(0); Ra = RISCV::X1; } else if (MI.getOpcode() == RISCV::PseudoJump) { Func = MI.getOperand(1); Ra = MI.getOperand(0).getReg(); } uint32_t Binary; assert(Func.isExpr() && "Expected expression"); const MCExpr *CallExpr = Func.getExpr(); // Emit AUIPC Ra, Func with R_RISCV_CALL relocation type. TmpInst = MCInstBuilder(RISCV::AUIPC) .addReg(Ra) .addOperand(MCOperand::createExpr(CallExpr)); Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); support::endian::write(OS, Binary, support::little); if (MI.getOpcode() == RISCV::PseudoTAIL || MI.getOpcode() == RISCV::PseudoJump) // Emit JALR X0, Ra, 0 TmpInst = MCInstBuilder(RISCV::JALR).addReg(RISCV::X0).addReg(Ra).addImm(0); else // Emit JALR Ra, Ra, 0 TmpInst = MCInstBuilder(RISCV::JALR).addReg(Ra).addReg(Ra).addImm(0); Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); support::endian::write(OS, Binary, support::little); } // Expand PseudoAddTPRel to a simple ADD with the correct relocation. void RISCVMCCodeEmitter::expandAddTPRel(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { MCOperand DestReg = MI.getOperand(0); MCOperand SrcReg = MI.getOperand(1); MCOperand TPReg = MI.getOperand(2); assert(TPReg.isReg() && TPReg.getReg() == RISCV::X4 && "Expected thread pointer as second input to TP-relative add"); MCOperand SrcSymbol = MI.getOperand(3); assert(SrcSymbol.isExpr() && "Expected expression as third input to TP-relative add"); const RISCVMCExpr *Expr = dyn_cast(SrcSymbol.getExpr()); assert(Expr && Expr->getKind() == RISCVMCExpr::VK_RISCV_TPREL_ADD && "Expected tprel_add relocation on TP-relative symbol"); // Emit the correct tprel_add relocation for the symbol. Fixups.push_back(MCFixup::create( 0, Expr, MCFixupKind(RISCV::fixup_riscv_tprel_add), MI.getLoc())); // Emit fixup_riscv_relax for tprel_add where the relax feature is enabled. if (STI.getFeatureBits()[RISCV::FeatureRelax]) { const MCConstantExpr *Dummy = MCConstantExpr::create(0, Ctx); Fixups.push_back(MCFixup::create( 0, Dummy, MCFixupKind(RISCV::fixup_riscv_relax), MI.getLoc())); } // Emit a normal ADD instruction with the given operands. MCInst TmpInst = MCInstBuilder(RISCV::ADD) .addOperand(DestReg) .addOperand(SrcReg) .addOperand(TPReg); uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); support::endian::write(OS, Binary, support::little); } +void RISCVMCCodeEmitter::expandVMSGE(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + MCInst TmpInst; + uint32_t Binary; + unsigned Opcode; + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unexpacted opcode. It should be vmsgeu.vx or vmsge.vx."); + case RISCV::PseudoVMSGEU_VX: + case RISCV::PseudoVMSGEU_VX_M: + case RISCV::PseudoVMSGEU_VX_M_T: + Opcode = RISCV::VMSLTU_VX; + break; + case RISCV::PseudoVMSGE_VX: + case RISCV::PseudoVMSGE_VX_M: + case RISCV::PseudoVMSGE_VX_M_T: + Opcode = RISCV::VMSLT_VX; + break; + } + if (MI.getNumOperands() == 3) { + // unmasked va >= x + // + // pseudoinstruction: vmsge{u}.vx vd, va, x + // expansion: vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd + TmpInst = MCInstBuilder(Opcode) + .addOperand(MI.getOperand(0)) + .addOperand(MI.getOperand(1)) + .addOperand(MI.getOperand(2)) + .addReg(RISCV::NoRegister); + Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); + support::endian::write(OS, Binary, support::little); + + TmpInst = MCInstBuilder(RISCV::VMNAND_MM) + .addOperand(MI.getOperand(0)) + .addOperand(MI.getOperand(0)) + .addOperand(MI.getOperand(0)); + Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); + support::endian::write(OS, Binary, support::little); + } else if (MI.getNumOperands() == 4) { + // masked va >= x, vd != v0 + // + // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t + // expansion: vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0 + assert(MI.getOperand(0).getReg() != RISCV::V0 && + "The destination register should not be V0."); + TmpInst = MCInstBuilder(Opcode) + .addOperand(MI.getOperand(0)) + .addOperand(MI.getOperand(1)) + .addOperand(MI.getOperand(2)) + .addOperand(MI.getOperand(3)); + Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); + support::endian::write(OS, Binary, support::little); + + TmpInst = MCInstBuilder(RISCV::VMXOR_MM) + .addOperand(MI.getOperand(0)) + .addOperand(MI.getOperand(0)) + .addReg(RISCV::V0); + Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); + support::endian::write(OS, Binary, support::little); + } else if (MI.getNumOperands() == 5) { + // masked va >= x, vd == v0 + // + // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt + // expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt + assert(MI.getOperand(0).getReg() == RISCV::V0 && + "The destination register should be V0."); + assert(MI.getOperand(1).getReg() != RISCV::V0 && + "The temporary vector register should not be V0."); + TmpInst = MCInstBuilder(Opcode) + .addOperand(MI.getOperand(1)) + .addOperand(MI.getOperand(2)) + .addOperand(MI.getOperand(3)) + .addOperand(MI.getOperand(4)); + Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); + support::endian::write(OS, Binary, support::little); + + TmpInst = MCInstBuilder(RISCV::VMANDNOT_MM) + .addOperand(MI.getOperand(0)) + .addOperand(MI.getOperand(0)) + .addOperand(MI.getOperand(1)); + Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); + support::endian::write(OS, Binary, support::little); + } +} + void RISCVMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { verifyInstructionPredicates(MI, computeAvailableFeatures(STI.getFeatureBits())); const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); // Get byte count of instruction. unsigned Size = Desc.getSize(); // RISCVInstrInfo::getInstSizeInBytes hard-codes the number of expanded // instructions for each pseudo, and must be updated when adding new pseudos // or changing existing ones. if (MI.getOpcode() == RISCV::PseudoCALLReg || MI.getOpcode() == RISCV::PseudoCALL || MI.getOpcode() == RISCV::PseudoTAIL || MI.getOpcode() == RISCV::PseudoJump) { expandFunctionCall(MI, OS, Fixups, STI); MCNumEmitted += 2; return; } if (MI.getOpcode() == RISCV::PseudoAddTPRel) { expandAddTPRel(MI, OS, Fixups, STI); MCNumEmitted += 1; return; } + if (MI.getOpcode() == RISCV::PseudoVMSGEU_VX || + MI.getOpcode() == RISCV::PseudoVMSGE_VX || + MI.getOpcode() == RISCV::PseudoVMSGEU_VX_M || + MI.getOpcode() == RISCV::PseudoVMSGE_VX_M || + MI.getOpcode() == RISCV::PseudoVMSGEU_VX_M_T || + MI.getOpcode() == RISCV::PseudoVMSGE_VX_M_T) { + expandVMSGE(MI, OS, Fixups, STI); + return; + } + switch (Size) { default: llvm_unreachable("Unhandled encodeInstruction length!"); case 2: { uint16_t Bits = getBinaryCodeForInstr(MI, Fixups, STI); support::endian::write(OS, Bits, support::little); break; } case 4: { uint32_t Bits = getBinaryCodeForInstr(MI, Fixups, STI); support::endian::write(OS, Bits, support::little); break; } } ++MCNumEmitted; // Keep track of the # of mi's emitted. } unsigned RISCVMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { if (MO.isReg()) return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); if (MO.isImm()) return static_cast(MO.getImm()); llvm_unreachable("Unhandled expression!"); return 0; } unsigned RISCVMCCodeEmitter::getImmOpValueAsr1(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { const MCOperand &MO = MI.getOperand(OpNo); if (MO.isImm()) { unsigned Res = MO.getImm(); assert((Res & 1) == 0 && "LSB is non-zero"); return Res >> 1; } return getImmOpValue(MI, OpNo, Fixups, STI); } unsigned RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { bool EnableRelax = STI.getFeatureBits()[RISCV::FeatureRelax]; const MCOperand &MO = MI.getOperand(OpNo); MCInstrDesc const &Desc = MCII.get(MI.getOpcode()); unsigned MIFrm = Desc.TSFlags & RISCVII::InstFormatMask; // If the destination is an immediate, there is nothing to do. if (MO.isImm()) return MO.getImm(); assert(MO.isExpr() && "getImmOpValue expects only expressions or immediates"); const MCExpr *Expr = MO.getExpr(); MCExpr::ExprKind Kind = Expr->getKind(); RISCV::Fixups FixupKind = RISCV::fixup_riscv_invalid; bool RelaxCandidate = false; if (Kind == MCExpr::Target) { const RISCVMCExpr *RVExpr = cast(Expr); switch (RVExpr->getKind()) { case RISCVMCExpr::VK_RISCV_None: case RISCVMCExpr::VK_RISCV_Invalid: case RISCVMCExpr::VK_RISCV_32_PCREL: llvm_unreachable("Unhandled fixup kind!"); case RISCVMCExpr::VK_RISCV_TPREL_ADD: // tprel_add is only used to indicate that a relocation should be emitted // for an add instruction used in TP-relative addressing. It should not be // expanded as if representing an actual instruction operand and so to // encounter it here is an error. llvm_unreachable( "VK_RISCV_TPREL_ADD should not represent an instruction operand"); case RISCVMCExpr::VK_RISCV_LO: if (MIFrm == RISCVII::InstFormatI) FixupKind = RISCV::fixup_riscv_lo12_i; else if (MIFrm == RISCVII::InstFormatS) FixupKind = RISCV::fixup_riscv_lo12_s; else llvm_unreachable("VK_RISCV_LO used with unexpected instruction format"); RelaxCandidate = true; break; case RISCVMCExpr::VK_RISCV_HI: FixupKind = RISCV::fixup_riscv_hi20; RelaxCandidate = true; break; case RISCVMCExpr::VK_RISCV_PCREL_LO: if (MIFrm == RISCVII::InstFormatI) FixupKind = RISCV::fixup_riscv_pcrel_lo12_i; else if (MIFrm == RISCVII::InstFormatS) FixupKind = RISCV::fixup_riscv_pcrel_lo12_s; else llvm_unreachable( "VK_RISCV_PCREL_LO used with unexpected instruction format"); RelaxCandidate = true; break; case RISCVMCExpr::VK_RISCV_PCREL_HI: FixupKind = RISCV::fixup_riscv_pcrel_hi20; RelaxCandidate = true; break; case RISCVMCExpr::VK_RISCV_GOT_HI: FixupKind = RISCV::fixup_riscv_got_hi20; break; case RISCVMCExpr::VK_RISCV_TPREL_LO: if (MIFrm == RISCVII::InstFormatI) FixupKind = RISCV::fixup_riscv_tprel_lo12_i; else if (MIFrm == RISCVII::InstFormatS) FixupKind = RISCV::fixup_riscv_tprel_lo12_s; else llvm_unreachable( "VK_RISCV_TPREL_LO used with unexpected instruction format"); RelaxCandidate = true; break; case RISCVMCExpr::VK_RISCV_TPREL_HI: FixupKind = RISCV::fixup_riscv_tprel_hi20; RelaxCandidate = true; break; case RISCVMCExpr::VK_RISCV_TLS_GOT_HI: FixupKind = RISCV::fixup_riscv_tls_got_hi20; break; case RISCVMCExpr::VK_RISCV_TLS_GD_HI: FixupKind = RISCV::fixup_riscv_tls_gd_hi20; break; case RISCVMCExpr::VK_RISCV_CALL: FixupKind = RISCV::fixup_riscv_call; RelaxCandidate = true; break; case RISCVMCExpr::VK_RISCV_CALL_PLT: FixupKind = RISCV::fixup_riscv_call_plt; RelaxCandidate = true; break; } } else if (Kind == MCExpr::SymbolRef && cast(Expr)->getKind() == MCSymbolRefExpr::VK_None) { if (Desc.getOpcode() == RISCV::JAL) { FixupKind = RISCV::fixup_riscv_jal; } else if (MIFrm == RISCVII::InstFormatB) { FixupKind = RISCV::fixup_riscv_branch; } else if (MIFrm == RISCVII::InstFormatCJ) { FixupKind = RISCV::fixup_riscv_rvc_jump; } else if (MIFrm == RISCVII::InstFormatCB) { FixupKind = RISCV::fixup_riscv_rvc_branch; } } assert(FixupKind != RISCV::fixup_riscv_invalid && "Unhandled expression!"); Fixups.push_back( MCFixup::create(0, Expr, MCFixupKind(FixupKind), MI.getLoc())); ++MCNumFixups; // Ensure an R_RISCV_RELAX relocation will be emitted if linker relaxation is // enabled and the current fixup will result in a relocation that may be // relaxed. if (EnableRelax && RelaxCandidate) { const MCConstantExpr *Dummy = MCConstantExpr::create(0, Ctx); Fixups.push_back( MCFixup::create(0, Dummy, MCFixupKind(RISCV::fixup_riscv_relax), MI.getLoc())); ++MCNumFixups; } return 0; } unsigned RISCVMCCodeEmitter::getVMaskReg(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { MCOperand MO = MI.getOperand(OpNo); assert(MO.isReg() && "Expected a register."); switch (MO.getReg()) { default: llvm_unreachable("Invalid mask register."); case RISCV::V0: return 0; case RISCV::NoRegister: return 1; } } #define ENABLE_INSTR_PREDICATE_VERIFIER #include "RISCVGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td index 3ac474cb6549..f0c9fcae9711 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -1,1101 +1,1141 @@ //===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// This file describes the RISC-V instructions from the standard 'V' Vector /// extension, version 0.9. /// This version is still experimental as the 'V' extension hasn't been /// ratified yet. /// //===----------------------------------------------------------------------===// include "RISCVInstrFormatsV.td" //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// def VTypeIAsmOperand : AsmOperandClass { let Name = "VTypeI"; let ParserMethod = "parseVTypeI"; let DiagnosticType = "InvalidVTypeI"; } def VTypeIOp : Operand { let ParserMatchClass = VTypeIAsmOperand; let PrintMethod = "printVTypeI"; let DecoderMethod = "decodeUImmOperand<11>"; } def VRegAsmOperand : AsmOperandClass { let Name = "RVVRegOpOperand"; let RenderMethod = "addRegOperands"; let PredicateMethod = "isReg"; let ParserMethod = "parseRegister"; } def VRegOp : RegisterOperand { let ParserMatchClass = VRegAsmOperand; let PrintMethod = "printOperand"; } def VMaskAsmOperand : AsmOperandClass { let Name = "RVVMaskRegOpOperand"; let RenderMethod = "addRegOperands"; let PredicateMethod = "isV0Reg"; let ParserMethod = "parseMaskReg"; let IsOptional = 1; let DefaultMethod = "defaultMaskRegOp"; let DiagnosticType = "InvalidVMaskRegister"; } def VMaskOp : RegisterOperand { let ParserMatchClass = VMaskAsmOperand; let PrintMethod = "printVMaskReg"; let EncoderMethod = "getVMaskReg"; let DecoderMethod = "decodeVMaskReg"; } def simm5 : Operand, ImmLeaf(Imm);}]> { let ParserMatchClass = SImmAsmOperand<5>; let EncoderMethod = "getImmOpValue"; let DecoderMethod = "decodeSImmOperand<5>"; let MCOperandPredicate = [{ int64_t Imm; if (MCOp.evaluateAsConstantImm(Imm)) return isInt<5>(Imm); return MCOp.isBareSymbolRef(); }]; } def SImm5Plus1AsmOperand : AsmOperandClass { let Name = "SImm5Plus1"; let RenderMethod = "addSImm5Plus1Operands"; let DiagnosticType = "InvalidSImm5Plus1"; } def simm5_plus1 : Operand, ImmLeaf(Imm - 1);}]> { let ParserMatchClass = SImm5Plus1AsmOperand; let PrintMethod = "printSImm5Plus1"; let MCOperandPredicate = [{ int64_t Imm; if (MCOp.evaluateAsConstantImm(Imm)) return isInt<5>(Imm - 1); return MCOp.isBareSymbolRef(); }]; } //===----------------------------------------------------------------------===// // Instruction class templates //===----------------------------------------------------------------------===// let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { // load vd, (rs1), vm class VUnitStrideLoad : RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0}, (outs VRegOp:$vd), (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">; // load vd, (rs1), rs2, vm class VStridedLoad : RVInstVLS<0b000, width.Value{3}, width.Value{2-0}, (outs VRegOp:$vd), (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, "$vd, (${rs1}), $rs2$vm">; // load vd, (rs1), vs2, vm class VIndexedLoad : RVInstVLX<0b000, width.Value{3}, width.Value{2-0}, (outs VRegOp:$vd), (ins GPR:$rs1, VRegOp:$vs2, VMaskOp:$vm), opcodestr, "$vd, (${rs1}), $vs2$vm">; // vlr.v vd, (rs1) class VWholeLoad nf, string opcodestr> : RVInstVLU { let vm = 1; let Uses = []; } // segment load vd, (rs1), vm class VUnitStrideSegmentLoad nf, RISCVLSUMOP lumop, RISCVWidth width, string opcodestr> : RVInstVLU; // segment load vd, (rs1), rs2, vm class VStridedSegmentLoad nf, RISCVWidth width, string opcodestr> : RVInstVLS; // segment load vd, (rs1), vs2, vm class VIndexedSegmentLoad nf, RISCVWidth width, string opcodestr> : RVInstVLX; } // hasSideEffects = 0, mayLoad = 1, mayStore = 0 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { // store vd, vs3, (rs1), vm class VUnitStrideStore : RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0}, (outs), (ins VRegOp:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr, "$vs3, (${rs1})$vm">; // store vd, vs3, (rs1), rs2, vm class VStridedStore : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs), (ins VRegOp:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, "$vs3, (${rs1}), $rs2$vm">; // store vd, vs3, (rs1), vs2, vm class VIndexedStore : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs), (ins VRegOp:$vs3, GPR:$rs1, VRegOp:$vs2, VMaskOp:$vm), opcodestr, "$vs3, (${rs1}), $vs2$vm">; // vsr.v vd, (rs1) class VWholeStore nf, string opcodestr> : RVInstVSU { let vm = 1; let Uses = []; } // segment store vd, vs3, (rs1), vm class VUnitStrideSegmentStore nf, RISCVWidth width, string opcodestr> : RVInstVSU; // segment store vd, vs3, (rs1), rs2, vm class VStridedSegmentStore nf, RISCVWidth width, string opcodestr> : RVInstVSS; // segment store vd, vs3, (rs1), vs2, vm class VIndexedSegmentStore nf, RISCVWidth width, string opcodestr> : RVInstVSX; } // hasSideEffects = 0, mayLoad = 0, mayStore = 1 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { // op vd, vs2, vs1, vm class VALUVV funct6, RISCVVFormat opv, string opcodestr> : RVInstVV; // op vd, vs2, vs1, v0 (without mask, use v0 as carry input) class VALUmVV funct6, RISCVVFormat opv, string opcodestr> : RVInstVV { let vm = 0; } // op vd, vs1, vs2, vm (reverse the order of vs1 and vs2) class VALUrVV funct6, RISCVVFormat opv, string opcodestr> : RVInstVV; // op vd, vs1, vs2 class VALUVVNoVm funct6, RISCVVFormat opv, string opcodestr> : RVInstVV { let vm = 1; } // op vd, vs2, rs1, vm class VALUVX funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, vs2, rs1, v0 (without mask, use v0 as carry input) class VALUmVX funct6, RISCVVFormat opv, string opcodestr> : RVInstVX { let vm = 0; } // op vd, rs1, vs2, vm (reverse the order of rs1 and vs2) class VALUrVX funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, vs1, vs2 class VALUVXNoVm funct6, RISCVVFormat opv, string opcodestr> : RVInstVX { let vm = 1; } // op vd, vs2, imm, vm class VALUVI funct6, string opcodestr, Operand optype = simm5> : RVInstIVI; // op vd, vs2, imm, v0 (without mask, use v0 as carry input) class VALUmVI funct6, string opcodestr, Operand optype = simm5> : RVInstIVI { let vm = 0; } // op vd, vs2, imm, vm class VALUVINoVm funct6, string opcodestr, Operand optype = simm5> : RVInstIVI { let vm = 1; } // op vd, vs2, rs1, vm (Float) class VALUVF funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2) class VALUrVF funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, vs2, vm (use vs1 as instruction encoding) class VALUVs2 funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> : RVInstV; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in { // vamo vd, (rs1), vs2, vd, vm class VAMOWd : RVInstVAMO { let Constraints = "$vd_wd = $vd"; let wd = 1; bits<5> vd; let Inst{11-7} = vd; } // vamo x0, (rs1), vs2, vs3, vm class VAMONoWd : RVInstVAMO { bits<5> vs3; let Inst{11-7} = vs3; } } // hasSideEffects = 0, mayLoad = 1, mayStore = 1 //===----------------------------------------------------------------------===// // Combination of instruction classes. // Use these multiclasses to define instructions more easily. //===----------------------------------------------------------------------===// multiclass VALU_IV_V_X_I funct6, Operand optype = simm5, string vw = "v"> { def V : VALUVV; def X : VALUVX; def I : VALUVI; } multiclass VALU_IV_V_X funct6, string vw = "v"> { def V : VALUVV; def X : VALUVX; } multiclass VALUr_IV_V_X funct6, string vw = "v"> { def V : VALUrVV; def X : VALUrVX; } multiclass VALU_IV_X_I funct6, Operand optype = simm5, string vw = "v"> { def X : VALUVX; def I : VALUVI; } multiclass VALU_IV_V funct6> { def _VS : VALUVV; } multiclass VALUr_IV_X funct6, string vw = "v"> { def X : VALUrVX; } multiclass VALU_MV_V_X funct6, string vw = "v"> { def V : VALUVV; def X : VALUVX; } multiclass VALU_MV_V funct6> { def _VS : VALUVV; } multiclass VALU_MV_Mask funct6, string vm = "v"> { def M : VALUVVNoVm; } multiclass VALU_MV_X funct6, string vw = "v"> { def X : VALUVX; } multiclass VALUr_MV_V_X funct6, string vw = "v"> { def V : VALUrVV; def X : VALUrVX; } multiclass VALUr_MV_X funct6, string vw = "v"> { def X : VALUrVX; } multiclass VALU_MV_VS2 funct6, bits<5> vs1> { def "" : VALUVs2; } multiclass VALUm_IV_V_X_I funct6> { def VM : VALUmVV; def XM : VALUmVX; def IM : VALUmVI; } multiclass VALUm_IV_V_X funct6> { def VM : VALUmVV; def XM : VALUmVX; } multiclass VALUNoVm_IV_V_X_I funct6, Operand optype = simm5> { def V : VALUVVNoVm; def X : VALUVXNoVm; def I : VALUVINoVm; } multiclass VALUNoVm_IV_V_X funct6> { def V : VALUVVNoVm; def X : VALUVXNoVm; } multiclass VALU_FV_V_F funct6, string vw = "v"> { def V : VALUVV; def F : VALUVF; } multiclass VALU_FV_F funct6, string vw = "v"> { def F : VALUVF; } multiclass VALUr_FV_V_F funct6, string vw = "v"> { def V : VALUrVV; def F : VALUrVF; } multiclass VALU_FV_V funct6> { def _VS : VALUVV; } multiclass VALU_FV_VS2 funct6, bits<5> vs1> { def "" : VALUVs2; } multiclass VAMO { def _WD : VAMOWd; def _UNWD : VAMONoWd; } //===----------------------------------------------------------------------===// // Instructions //===----------------------------------------------------------------------===// let Predicates = [HasStdExtV] in { let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in { def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), "vsetvli", "$rd, $rs1, $vtypei">; def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), "vsetvl", "$rd, $rs1, $rs2">; } // hasSideEffects = 1, mayLoad = 0, mayStore = 0 // Vector Unit-Stride Instructions def VLE8_V : VUnitStrideLoad; def VLE16_V : VUnitStrideLoad; def VLE32_V : VUnitStrideLoad; def VLE64_V : VUnitStrideLoad; def VLE128_V : VUnitStrideLoad; def VLE256_V : VUnitStrideLoad; def VLE512_V : VUnitStrideLoad; def VLE1024_V : VUnitStrideLoad; def VLE8FF_V : VUnitStrideLoad; def VLE16FF_V : VUnitStrideLoad; def VLE32FF_V : VUnitStrideLoad; def VLE64FF_V : VUnitStrideLoad; def VLE128FF_V : VUnitStrideLoad; def VLE256FF_V : VUnitStrideLoad; def VLE512FF_V : VUnitStrideLoad; def VLE1024FF_V : VUnitStrideLoad; def VSE8_V : VUnitStrideStore; def VSE16_V : VUnitStrideStore; def VSE32_V : VUnitStrideStore; def VSE64_V : VUnitStrideStore; def VSE128_V : VUnitStrideStore; def VSE256_V : VUnitStrideStore; def VSE512_V : VUnitStrideStore; def VSE1024_V : VUnitStrideStore; // Vector Strided Instructions def VLSE8_V : VStridedLoad; def VLSE16_V : VStridedLoad; def VLSE32_V : VStridedLoad; def VLSE64_V : VStridedLoad; def VLSE128_V : VStridedLoad; def VLSE256_V : VStridedLoad; def VLSE512_V : VStridedLoad; def VLSE1024_V : VStridedLoad; def VSSE8_V : VStridedStore; def VSSE16_V : VStridedStore; def VSSE32_V : VStridedStore; def VSSE64_V : VStridedStore; def VSSE128_V : VStridedStore; def VSSE256_V : VStridedStore; def VSSE512_V : VStridedStore; def VSSE1024_V : VStridedStore; // Vector Indexed Instructions def VLXEI8_V : VIndexedLoad; def VLXEI16_V : VIndexedLoad; def VLXEI32_V : VIndexedLoad; def VLXEI64_V : VIndexedLoad; def VLXEI128_V : VIndexedLoad; def VLXEI256_V : VIndexedLoad; def VLXEI512_V : VIndexedLoad; def VLXEI1024_V : VIndexedLoad; def VSXEI8_V : VIndexedStore; def VSXEI16_V : VIndexedStore; def VSXEI32_V : VIndexedStore; def VSXEI64_V : VIndexedStore; def VSXEI128_V : VIndexedStore; def VSXEI256_V : VIndexedStore; def VSXEI512_V : VIndexedStore; def VSXEI1024_V : VIndexedStore; def VSUXEI8_V : VIndexedStore; def VSUXEI16_V : VIndexedStore; def VSUXEI32_V : VIndexedStore; def VSUXEI64_V : VIndexedStore; def VSUXEI128_V : VIndexedStore; def VSUXEI256_V : VIndexedStore; def VSUXEI512_V : VIndexedStore; def VSUXEI1024_V : VIndexedStore; def VL1R_V : VWholeLoad<0, "vl1r.v">; def VS1R_V : VWholeStore<0, "vs1r.v">; // Vector Single-Width Integer Add and Subtract defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>; defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>; defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>; // Vector Widening Integer Add/Subtract // Refer to 11.2 Widening Vector Arithmetic Instructions // The destination vector register group cannot overlap a source vector // register group of a different element width (including the mask register // if masked), otherwise an illegal instruction exception is raised. let Constraints = "@earlyclobber $vd" in { let RVVConstraint = WidenV in { defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>; defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>; defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>; defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>; } // RVVConstraint = WidenV // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. let RVVConstraint = WidenW in { defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">; defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">; defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">; defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">; } // RVVConstraint = WidenW } // Constraints = "@earlyclobber $vd" def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm", (VWADD_VX VRegOp:$vd, VRegOp:$vs, X0, VMaskOp:$vm)>; def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm", (VWADDU_VX VRegOp:$vd, VRegOp:$vs, X0, VMaskOp:$vm)>; // Vector Integer Extension defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>; defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>; defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>; defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>; defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>; defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>; // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>; let Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc in { defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>; defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>; let Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc in { defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>; defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc // Vector Bitwise Logical Instructions defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>; defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>; defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>; def : InstAlias<"vnot.v $vd, $vs$vm", (VXOR_VI VRegOp:$vd, VRegOp:$vs, -1, VMaskOp:$vm)>; // Vector Single-Width Bit Shift Instructions defm VSLL_V : VALU_IV_V_X_I<"vsll", 0b100101, uimm5>; defm VSRL_V : VALU_IV_V_X_I<"vsrl", 0b101000, uimm5>; defm VSRA_V : VALU_IV_V_X_I<"vsra", 0b101001, uimm5>; // Vector Narrowing Integer Right Shift Instructions // Refer to 11.3. Narrowing Vector Arithmetic Instructions // The destination vector register group cannot overlap the first source // vector register group (specified by vs2). The destination vector register // group cannot overlap the mask register if used, unless LMUL=1. let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in { defm VNSRL_W : VALU_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">; defm VNSRA_W : VALU_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">; } // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow // Vector Integer Comparison Instructions let RVVConstraint = NoConstraint in { defm VMSEQ_V : VALU_IV_V_X_I<"vmseq", 0b011000>; defm VMSNE_V : VALU_IV_V_X_I<"vmsne", 0b011001>; defm VMSLTU_V : VALU_IV_V_X<"vmsltu", 0b011010>; defm VMSLT_V : VALU_IV_V_X<"vmslt", 0b011011>; defm VMSLEU_V : VALU_IV_V_X_I<"vmsleu", 0b011100>; defm VMSLE_V : VALU_IV_V_X_I<"vmsle", 0b011101>; defm VMSGTU_V : VALU_IV_X_I<"vmsgtu", 0b011110>; defm VMSGT_V : VALU_IV_X_I<"vmsgt", 0b011111>; } // RVVConstraint = NoConstraint def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm", (VMSLTU_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm", (VMSLT_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm", (VMSLEU_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsge.vv $vd, $va, $vb$vm", (VMSLE_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsltu.vi $vd, $va, $imm$vm", (VMSLEU_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; def : InstAlias<"vmslt.vi $vd, $va, $imm$vm", (VMSLE_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; def : InstAlias<"vmsgeu.vi $vd, $va, $imm$vm", (VMSGTU_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; def : InstAlias<"vmsge.vi $vd, $va, $imm$vm", (VMSGT_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; +let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { +def PseudoVMSGEU_VX : Pseudo<(outs VRegOp:$vd), + (ins VRegOp:$vs2, GPR:$rs1), + [], "vmsgeu.vx", "$vd, $vs2, $rs1">; +def PseudoVMSGE_VX : Pseudo<(outs VRegOp:$vd), + (ins VRegOp:$vs2, GPR:$rs1), + [], "vmsge.vx", "$vd, $vs2, $rs1">; +def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd), + (ins VRegOp:$vs2, GPR:$rs1, VMaskOp:$vm), + [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">; +def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd), + (ins VRegOp:$vs2, GPR:$rs1, VMaskOp:$vm), + [], "vmsge.vx", "$vd, $vs2, $rs1$vm">; +def PseudoVMSGEU_VX_M_T : Pseudo<(outs VMV0:$vd, VRegOp:$scratch), + (ins VRegOp:$vs2, GPR:$rs1, VMaskOp:$vm), + [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">; +def PseudoVMSGE_VX_M_T : Pseudo<(outs VMV0:$vd, VRegOp:$scratch), + (ins VRegOp:$vs2, GPR:$rs1, VMaskOp:$vm), + [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">; +} + +// This apparently unnecessary alias prevents matching `vmsge{u}.vx vd, vs2, vs1` as if +// it were an unmasked (i.e. $vm = RISCV::NoRegister) PseudoVMSGE{U}_VX_M. +def : InstAlias<"vmsgeu.vx $vd, $va, $rs1", + (PseudoVMSGEU_VX VRegOp:$vd, VRegOp:$va, GPR:$rs1), 0>; +def : InstAlias<"vmsge.vx $vd, $va, $rs1", + (PseudoVMSGE_VX VRegOp:$vd, VRegOp:$va, GPR:$rs1), 0>; +def : InstAlias<"vmsgeu.vx v0, $va, $rs1, $vm, $vt", + (PseudoVMSGEU_VX_M_T V0, VRegOp:$vt, VRegOp:$va, GPR:$rs1, + VMaskOp:$vm), 0>; +def : InstAlias<"vmsge.vx v0, $va, $rs1, $vm, $vt", + (PseudoVMSGE_VX_M_T V0, VRegOp:$vt, VRegOp:$va, GPR:$rs1, + VMaskOp:$vm), 0>; +def : InstAlias<"vmsgeu.vx $vd, $va, $rs1, $vm", + (PseudoVMSGEU_VX_M VRNoV0:$vd, VRegOp:$va, GPR:$rs1, + VMaskOp:$vm), 0>; +def : InstAlias<"vmsge.vx $vd, $va, $rs1, $vm", + (PseudoVMSGE_VX_M VRNoV0:$vd, VRegOp:$va, GPR:$rs1, + VMaskOp:$vm), 0>; + // Vector Integer Min/Max Instructions defm VMINU_V : VALU_IV_V_X<"vminu", 0b000100>; defm VMIN_V : VALU_IV_V_X<"vmin", 0b000101>; defm VMAXU_V : VALU_IV_V_X<"vmaxu", 0b000110>; defm VMAX_V : VALU_IV_V_X<"vmax", 0b000111>; // Vector Single-Width Integer Multiply Instructions defm VMUL_V : VALU_MV_V_X<"vmul", 0b100101>; defm VMULH_V : VALU_MV_V_X<"vmulh", 0b100111>; defm VMULHU_V : VALU_MV_V_X<"vmulhu", 0b100100>; defm VMULHSU_V : VALU_MV_V_X<"vmulhsu", 0b100110>; // Vector Integer Divide Instructions defm VDIVU_V : VALU_MV_V_X<"vdivu", 0b100000>; defm VDIV_V : VALU_MV_V_X<"vdiv", 0b100001>; defm VREMU_V : VALU_MV_V_X<"vremu", 0b100010>; defm VREM_V : VALU_MV_V_X<"vrem", 0b100011>; // Vector Widening Integer Multiply Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VWMUL_V : VALU_MV_V_X<"vwmul", 0b111011>; defm VWMULU_V : VALU_MV_V_X<"vwmulu", 0b111000>; defm VWMULSU_V : VALU_MV_V_X<"vwmulsu", 0b111010>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Single-Width Integer Multiply-Add Instructions defm VMACC_V : VALUr_MV_V_X<"vmacc", 0b101101>; defm VNMSAC_V : VALUr_MV_V_X<"vnmsac", 0b101111>; defm VMADD_V : VALUr_MV_V_X<"vmadd", 0b101001>; defm VNMSUB_V : VALUr_MV_V_X<"vnmsub", 0b101011>; // Vector Widening Integer Multiply-Add Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VWMACCU_V : VALUr_MV_V_X<"vwmaccu", 0b111100>; defm VWMACC_V : VALUr_MV_V_X<"vwmacc", 0b111101>; defm VWMACCSU_V : VALUr_MV_V_X<"vwmaccsu", 0b111111>; defm VWMACCUS_V : VALUr_MV_X<"vwmaccus", 0b111110>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Integer Merge Instructions defm VMERGE_V : VALUm_IV_V_X_I<"vmerge", 0b010111>; // Vector Integer Move Instructions let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1 in { // op vd, vs1 def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VRegOp:$vd), (ins VRegOp:$vs1), "vmv.v.v", "$vd, $vs1">; // op vd, rs1 def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VRegOp:$vd), (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">; // op vd, imm def VMV_V_I : RVInstIVI<0b010111, (outs VRegOp:$vd), (ins simm5:$imm), "vmv.v.i", "$vd, $imm">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 // Vector Fixed-Point Arithmetic Instructions defm VSADDU_V : VALU_IV_V_X_I<"vsaddu", 0b100000>; defm VSADD_V : VALU_IV_V_X_I<"vsadd", 0b100001>; defm VSSUBU_V : VALU_IV_V_X<"vssubu", 0b100010>; defm VSSUB_V : VALU_IV_V_X<"vssub", 0b100011>; // Vector Single-Width Averaging Add and Subtract defm VAADDU_V : VALU_MV_V_X<"vaaddu", 0b001000>; defm VAADD_V : VALU_MV_V_X<"vaadd", 0b001001>; defm VASUBU_V : VALU_MV_V_X<"vasubu", 0b001010>; defm VASUB_V : VALU_MV_V_X<"vasub", 0b001011>; // Vector Single-Width Fractional Multiply with Rounding and Saturation defm VSMUL_V : VALU_IV_V_X<"vsmul", 0b100111>; // Vector Single-Width Scaling Shift Instructions defm VSSRL_V : VALU_IV_V_X_I<"vssrl", 0b101010, uimm5>; defm VSSRA_V : VALU_IV_V_X_I<"vssra", 0b101011, uimm5>; // Vector Narrowing Fixed-Point Clip Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in { defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">; defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">; } // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow // Vector Single-Width Floating-Point Add/Subtract Instructions defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>; defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>; defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>; // Vector Widening Floating-Point Add/Subtract Instructions let Constraints = "@earlyclobber $vd" in { let RVVConstraint = WidenV in { defm VFWADD_V : VALU_FV_V_F<"vfwadd", 0b110000>; defm VFWSUB_V : VALU_FV_V_F<"vfwsub", 0b110010>; } // RVVConstraint = WidenV // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. let RVVConstraint = WidenW in { defm VFWADD_W : VALU_FV_V_F<"vfwadd", 0b110100, "w">; defm VFWSUB_W : VALU_FV_V_F<"vfwsub", 0b110110, "w">; } // RVVConstraint = WidenW } // Constraints = "@earlyclobber $vd" // Vector Single-Width Floating-Point Multiply/Divide Instructions defm VFMUL_V : VALU_FV_V_F<"vfmul", 0b100100>; defm VFDIV_V : VALU_FV_V_F<"vfdiv", 0b100000>; defm VFRDIV_V : VALU_FV_F<"vfrdiv", 0b100001>; // Vector Widening Floating-Point Multiply let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VFWMUL_V : VALU_FV_V_F<"vfwmul", 0b111000>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Single-Width Floating-Point Fused Multiply-Add Instructions defm VFMACC_V : VALUr_FV_V_F<"vfmacc", 0b101100>; defm VFNMACC_V : VALUr_FV_V_F<"vfnmacc", 0b101101>; defm VFMSAC_V : VALUr_FV_V_F<"vfmsac", 0b101110>; defm VFNMSAC_V : VALUr_FV_V_F<"vfnmsac", 0b101111>; defm VFMADD_V : VALUr_FV_V_F<"vfmadd", 0b101000>; defm VFNMADD_V : VALUr_FV_V_F<"vfnmadd", 0b101001>; defm VFMSUB_V : VALUr_FV_V_F<"vfmsub", 0b101010>; defm VFNMSUB_V : VALUr_FV_V_F<"vfnmsub", 0b101011>; // Vector Widening Floating-Point Fused Multiply-Add Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VFWMACC_V : VALUr_FV_V_F<"vfwmacc", 0b111100>; defm VFWNMACC_V : VALUr_FV_V_F<"vfwnmacc", 0b111101>; defm VFWMSAC_V : VALUr_FV_V_F<"vfwmsac", 0b111110>; defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Floating-Point Square-Root Instruction defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>; // Vector Floating-Point MIN/MAX Instructions defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>; defm VFMAX_V : VALU_FV_V_F<"vfmax", 0b000110>; // Vector Floating-Point Sign-Injection Instructions defm VFSGNJ_V : VALU_FV_V_F<"vfsgnj", 0b001000>; defm VFSGNJN_V : VALU_FV_V_F<"vfsgnjn", 0b001001>; defm VFSGNJX_V : VALU_FV_V_F<"vfsgnjx", 0b001010>; // Vector Floating-Point Compare Instructions let RVVConstraint = NoConstraint in { defm VMFEQ_V : VALU_FV_V_F<"vmfeq", 0b011000>; defm VMFNE_V : VALU_FV_V_F<"vmfne", 0b011100>; defm VMFLT_V : VALU_FV_V_F<"vmflt", 0b011011>; defm VMFLE_V : VALU_FV_V_F<"vmfle", 0b011001>; defm VMFGT_V : VALU_FV_F<"vmfgt", 0b011101>; defm VMFGE_V : VALU_FV_F<"vmfge", 0b011111>; } // RVVConstraint = NoConstraint def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm", (VMFLT_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmfge.vv $vd, $va, $vb$vm", (VMFLE_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>; // Vector Floating-Point Classify Instruction defm VFCLASS_V : VALU_FV_VS2<"vfclass.v", 0b010011, 0b10000>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { // Vector Floating-Point Merge Instruction def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VRegOp:$vd), (ins VRegOp:$vs2, FPR32:$rs1, VMV0:$v0), "vfmerge.vfm", "$vd, $vs2, $rs1, v0"> { let vm = 0; } // Vector Floating-Point Move Instruction def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VRegOp:$vd), (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1"> { let vs2 = 0; let vm = 1; } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 // Single-Width Floating-Point/Integer Type-Convert Instructions defm VFCVT_XU_F_V : VALU_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>; defm VFCVT_X_F_V : VALU_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>; defm VFCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>; defm VFCVT_RTZ_X_F_V : VALU_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>; defm VFCVT_F_XU_V : VALU_FV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>; defm VFCVT_F_X_V : VALU_FV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>; // Widening Floating-Point/Integer Type-Convert Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in { defm VFWCVT_XU_F_V : VALU_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>; defm VFWCVT_X_F_V : VALU_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>; defm VFWCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>; defm VFWCVT_RTZ_X_F_V : VALU_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>; defm VFWCVT_F_XU_V : VALU_FV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>; defm VFWCVT_F_X_V : VALU_FV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>; defm VFWCVT_F_F_V : VALU_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt // Narrowing Floating-Point/Integer Type-Convert Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = NarrowCvt in { defm VFNCVT_XU_F_W : VALU_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>; defm VFNCVT_X_F_W : VALU_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>; defm VFNCVT_RTZ_XU_F_W : VALU_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>; defm VFNCVT_RTZ_X_F_W : VALU_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>; defm VFNCVT_F_XU_W : VALU_FV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>; defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>; defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>; defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NarrowCvt // Vector Single-Width Integer Reduction Instructions let RVVConstraint = NoConstraint in { defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>; defm VREDMAXU : VALU_MV_V<"vredmaxu", 0b000110>; defm VREDMAX : VALU_MV_V<"vredmax", 0b000111>; defm VREDMINU : VALU_MV_V<"vredminu", 0b000100>; defm VREDMIN : VALU_MV_V<"vredmin", 0b000101>; defm VREDAND : VALU_MV_V<"vredand", 0b000001>; defm VREDOR : VALU_MV_V<"vredor", 0b000010>; defm VREDXOR : VALU_MV_V<"vredxor", 0b000011>; } // RVVConstraint = NoConstraint // Vector Widening Integer Reduction Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>; defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint // Vector Single-Width Floating-Point Reduction Instructions let RVVConstraint = NoConstraint in { defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>; defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>; defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>; defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>; } // RVVConstraint = NoConstraint // Vector Widening Floating-Point Reduction Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>; defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint // Vector Mask-Register Logical Instructions defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">; defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">; defm VMANDNOT_M : VALU_MV_Mask<"vmandnot", 0b011000, "m">; defm VMXOR_M : VALU_MV_Mask<"vmxor", 0b011011, "m">; defm VMOR_M : VALU_MV_Mask<"vmor", 0b011010, "m">; defm VMNOR_M : VALU_MV_Mask<"vmnor", 0b011110, "m">; defm VMORNOT_M : VALU_MV_Mask<"vmornot", 0b011100, "m">; defm VMXNOR_M : VALU_MV_Mask<"vmxnor", 0b011111, "m">; def : InstAlias<"vmmv.m $vd, $vs", (VMAND_MM VRegOp:$vd, VRegOp:$vs, VRegOp:$vs)>; def : InstAlias<"vmclr.m $vd", (VMXOR_MM VRegOp:$vd, VRegOp:$vd, VRegOp:$vd)>; def : InstAlias<"vmset.m $vd", (VMXNOR_MM VRegOp:$vd, VRegOp:$vd, VRegOp:$vd)>; def : InstAlias<"vmnot.m $vd, $vs", (VMNAND_MM VRegOp:$vd, VRegOp:$vs, VRegOp:$vs)>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { // Vector mask population count vpopc def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd), (ins VRegOp:$vs2, VMaskOp:$vm), "vpopc.m", "$vd, $vs2$vm">; // vfirst find-first-set mask bit def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd), (ins VRegOp:$vs2, VMaskOp:$vm), "vfirst.m", "$vd, $vs2$vm">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 // vmsbf.m set-before-first mask bit defm VMSBF_M : VALU_MV_VS2<"vmsbf.m", 0b010100, 0b00001>; // vmsif.m set-including-first mask bit defm VMSIF_M : VALU_MV_VS2<"vmsif.m", 0b010100, 0b00011>; // vmsof.m set-only-first mask bit defm VMSOF_M : VALU_MV_VS2<"vmsof.m", 0b010100, 0b00010>; // Vector Iota Instruction let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in { defm VIOTA_M : VALU_MV_VS2<"viota.m", 0b010100, 0b10000>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Iota // Vector Element Index Instruction let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VRegOp:$vd), (ins VMaskOp:$vm), "vid.v", "$vd$vm"> { let vs2 = 0; } // Integer Scalar Move Instructions let vm = 1 in { def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd), (ins VRegOp:$vs2), "vmv.x.s", "$vd, $vs2">; def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VRegOp:$vd), (ins GPR:$rs1), "vmv.s.x", "$vd, $rs1">; } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 in { // Floating-Point Scalar Move Instructions def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd), (ins VRegOp:$vs2), "vfmv.f.s", "$vd, $vs2">; def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VRegOp:$vd), (ins FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 // Vector Slide Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>; } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>; let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>; defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>; } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>; defm VFSLIDE1DOWN_V : VALU_FV_F<"vfslide1down", 0b001111>; // Vector Register Gather Instruction let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in { defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather // Vector Compress Instruction let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in { defm VCOMPRESS_V : VALU_MV_Mask<"vcompress", 0b010111>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { foreach nf = [1, 2, 4, 8] in { def VMV#nf#R_V : RVInstV<0b100111, !add(nf, -1), OPIVI, (outs VRegOp:$vd), (ins VRegOp:$vs2), "vmv" # nf # "r.v", "$vd, $vs2"> { let Uses = []; let vm = 1; } } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 } // Predicates = [HasStdExtV] let Predicates = [HasStdExtZvlsseg] in { foreach nf=2-8 in { def VLSEG#nf#E8_V : VUnitStrideSegmentLoad; def VLSEG#nf#E16_V : VUnitStrideSegmentLoad; def VLSEG#nf#E32_V : VUnitStrideSegmentLoad; def VLSEG#nf#E64_V : VUnitStrideSegmentLoad; def VLSEG#nf#E128_V : VUnitStrideSegmentLoad; def VLSEG#nf#E256_V : VUnitStrideSegmentLoad; def VLSEG#nf#E512_V : VUnitStrideSegmentLoad; def VLSEG#nf#E1024_V : VUnitStrideSegmentLoad; def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E128FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E256FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E512FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E1024FF_V : VUnitStrideSegmentLoad; def VSSEG#nf#E8_V : VUnitStrideSegmentStore; def VSSEG#nf#E16_V : VUnitStrideSegmentStore; def VSSEG#nf#E32_V : VUnitStrideSegmentStore; def VSSEG#nf#E64_V : VUnitStrideSegmentStore; def VSSEG#nf#E128_V : VUnitStrideSegmentStore; def VSSEG#nf#E256_V : VUnitStrideSegmentStore; def VSSEG#nf#E512_V : VUnitStrideSegmentStore; def VSSEG#nf#E1024_V : VUnitStrideSegmentStore; // Vector Strided Instructions def VLSSEG#nf#E8_V : VStridedSegmentLoad; def VLSSEG#nf#E16_V : VStridedSegmentLoad; def VLSSEG#nf#E32_V : VStridedSegmentLoad; def VLSSEG#nf#E64_V : VStridedSegmentLoad; def VLSSEG#nf#E128_V : VStridedSegmentLoad; def VLSSEG#nf#E256_V : VStridedSegmentLoad; def VLSSEG#nf#E512_V : VStridedSegmentLoad; def VLSSEG#nf#E1024_V : VStridedSegmentLoad; def VSSSEG#nf#E8_V : VStridedSegmentStore; def VSSSEG#nf#E16_V : VStridedSegmentStore; def VSSSEG#nf#E32_V : VStridedSegmentStore; def VSSSEG#nf#E64_V : VStridedSegmentStore; def VSSSEG#nf#E128_V : VStridedSegmentStore; def VSSSEG#nf#E256_V : VStridedSegmentStore; def VSSSEG#nf#E512_V : VStridedSegmentStore; def VSSSEG#nf#E1024_V : VStridedSegmentStore; // Vector Indexed Instructions def VLXSEG#nf#EI8_V : VIndexedSegmentLoad; def VLXSEG#nf#EI16_V : VIndexedSegmentLoad; def VLXSEG#nf#EI32_V : VIndexedSegmentLoad; def VLXSEG#nf#EI64_V : VIndexedSegmentLoad; def VLXSEG#nf#EI128_V : VIndexedSegmentLoad; def VLXSEG#nf#EI256_V : VIndexedSegmentLoad; def VLXSEG#nf#EI512_V : VIndexedSegmentLoad; def VLXSEG#nf#EI1024_V : VIndexedSegmentLoad; def VSXSEG#nf#EI8_V : VIndexedSegmentStore; def VSXSEG#nf#EI16_V : VIndexedSegmentStore; def VSXSEG#nf#EI32_V : VIndexedSegmentStore; def VSXSEG#nf#EI64_V : VIndexedSegmentStore; def VSXSEG#nf#EI128_V : VIndexedSegmentStore; def VSXSEG#nf#EI256_V : VIndexedSegmentStore; def VSXSEG#nf#EI512_V : VIndexedSegmentStore; def VSXSEG#nf#EI1024_V : VIndexedSegmentStore; } } // Predicates = [HasStdExtZvlsseg] let Predicates = [HasStdExtZvamo, HasStdExtA] in { defm VAMOSWAPEI8 : VAMO; defm VAMOSWAPEI16 : VAMO; defm VAMOSWAPEI32 : VAMO; defm VAMOADDEI8 : VAMO; defm VAMOADDEI16 : VAMO; defm VAMOADDEI32 : VAMO; defm VAMOXOREI8 : VAMO; defm VAMOXOREI16 : VAMO; defm VAMOXOREI32 : VAMO; defm VAMOANDEI8 : VAMO; defm VAMOANDEI16 : VAMO; defm VAMOANDEI32 : VAMO; defm VAMOOREI8 : VAMO; defm VAMOOREI16 : VAMO; defm VAMOOREI32 : VAMO; defm VAMOMINEI8 : VAMO; defm VAMOMINEI16 : VAMO; defm VAMOMINEI32 : VAMO; defm VAMOMAXEI8 : VAMO; defm VAMOMAXEI16 : VAMO; defm VAMOMAXEI32 : VAMO; defm VAMOMINUEI8 : VAMO; defm VAMOMINUEI16 : VAMO; defm VAMOMINUEI32 : VAMO; defm VAMOMAXUEI8 : VAMO; defm VAMOMAXUEI16 : VAMO; defm VAMOMAXUEI32 : VAMO; } // Predicates = [HasStdExtZvamo, HasStdExtA] let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in { defm VAMOSWAPEI64 : VAMO; defm VAMOADDEI64 : VAMO; defm VAMOXOREI64 : VAMO; defm VAMOANDEI64 : VAMO; defm VAMOOREI64 : VAMO; defm VAMOMINEI64 : VAMO; defm VAMOMAXEI64 : VAMO; defm VAMOMINUEI64 : VAMO; defm VAMOMAXUEI64 : VAMO; } // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index 7544b4b3b845..2b44847104a0 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -1,334 +1,343 @@ //===-- RISCVRegisterInfo.td - RISC-V Register defs --------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Declarations that describe the RISC-V register files //===----------------------------------------------------------------------===// let Namespace = "RISCV" in { class RISCVReg Enc, string n, list alt = []> : Register { let HWEncoding{4-0} = Enc; let AltNames = alt; } class RISCVReg32 Enc, string n, list alt = []> : Register { let HWEncoding{4-0} = Enc; let AltNames = alt; } // Because RISCVReg64 register have AsmName and AltNames that alias with their // 32-bit sub-register, RISCVAsmParser will need to coerce a register number // from a RISCVReg32 to the equivalent RISCVReg64 when appropriate. def sub_32 : SubRegIndex<32>; class RISCVReg64 : Register<""> { let HWEncoding{4-0} = subreg.HWEncoding{4-0}; let SubRegs = [subreg]; let SubRegIndices = [sub_32]; let AsmName = subreg.AsmName; let AltNames = subreg.AltNames; } class RISCVRegWithSubRegs Enc, string n, list subregs, list alt = []> : RegisterWithSubRegs { let HWEncoding{4-0} = Enc; let AltNames = alt; } def ABIRegAltName : RegAltNameIndex; def sub_vrm2 : SubRegIndex<64, -1>; def sub_vrm2_hi : SubRegIndex<64, -1>; def sub_vrm4 : SubRegIndex<128, -1>; def sub_vrm4_hi : SubRegIndex<128, -1>; def sub_vrm8 : SubRegIndex<256, -1>; def sub_vrm8_hi : SubRegIndex<256, -1>; } // Namespace = "RISCV" // Integer registers // CostPerUse is set higher for registers that may not be compressible as they // are not part of GPRC, the most restrictive register class used by the // compressed instruction set. This will influence the greedy register // allocator to reduce the use of registers that can't be encoded in 16 bit // instructions. This affects register allocation even when compressed // instruction isn't targeted, we see no major negative codegen impact. let RegAltNameIndices = [ABIRegAltName] in { def X0 : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>; let CostPerUse = 1 in { def X1 : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>; def X2 : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>; def X3 : RISCVReg<3, "x3", ["gp"]>, DwarfRegNum<[3]>; def X4 : RISCVReg<4, "x4", ["tp"]>, DwarfRegNum<[4]>; def X5 : RISCVReg<5, "x5", ["t0"]>, DwarfRegNum<[5]>; def X6 : RISCVReg<6, "x6", ["t1"]>, DwarfRegNum<[6]>; def X7 : RISCVReg<7, "x7", ["t2"]>, DwarfRegNum<[7]>; } def X8 : RISCVReg<8, "x8", ["s0", "fp"]>, DwarfRegNum<[8]>; def X9 : RISCVReg<9, "x9", ["s1"]>, DwarfRegNum<[9]>; def X10 : RISCVReg<10,"x10", ["a0"]>, DwarfRegNum<[10]>; def X11 : RISCVReg<11,"x11", ["a1"]>, DwarfRegNum<[11]>; def X12 : RISCVReg<12,"x12", ["a2"]>, DwarfRegNum<[12]>; def X13 : RISCVReg<13,"x13", ["a3"]>, DwarfRegNum<[13]>; def X14 : RISCVReg<14,"x14", ["a4"]>, DwarfRegNum<[14]>; def X15 : RISCVReg<15,"x15", ["a5"]>, DwarfRegNum<[15]>; let CostPerUse = 1 in { def X16 : RISCVReg<16,"x16", ["a6"]>, DwarfRegNum<[16]>; def X17 : RISCVReg<17,"x17", ["a7"]>, DwarfRegNum<[17]>; def X18 : RISCVReg<18,"x18", ["s2"]>, DwarfRegNum<[18]>; def X19 : RISCVReg<19,"x19", ["s3"]>, DwarfRegNum<[19]>; def X20 : RISCVReg<20,"x20", ["s4"]>, DwarfRegNum<[20]>; def X21 : RISCVReg<21,"x21", ["s5"]>, DwarfRegNum<[21]>; def X22 : RISCVReg<22,"x22", ["s6"]>, DwarfRegNum<[22]>; def X23 : RISCVReg<23,"x23", ["s7"]>, DwarfRegNum<[23]>; def X24 : RISCVReg<24,"x24", ["s8"]>, DwarfRegNum<[24]>; def X25 : RISCVReg<25,"x25", ["s9"]>, DwarfRegNum<[25]>; def X26 : RISCVReg<26,"x26", ["s10"]>, DwarfRegNum<[26]>; def X27 : RISCVReg<27,"x27", ["s11"]>, DwarfRegNum<[27]>; def X28 : RISCVReg<28,"x28", ["t3"]>, DwarfRegNum<[28]>; def X29 : RISCVReg<29,"x29", ["t4"]>, DwarfRegNum<[29]>; def X30 : RISCVReg<30,"x30", ["t5"]>, DwarfRegNum<[30]>; def X31 : RISCVReg<31,"x31", ["t6"]>, DwarfRegNum<[31]>; } } def XLenVT : ValueTypeByHwMode<[RV32, RV64, DefaultMode], [i32, i64, i32]>; // The order of registers represents the preferred allocation sequence. // Registers are listed in the order caller-save, callee-save, specials. def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add (sequence "X%u", 10, 17), (sequence "X%u", 5, 7), (sequence "X%u", 28, 31), (sequence "X%u", 8, 9), (sequence "X%u", 18, 27), (sequence "X%u", 0, 4) )> { let RegInfos = RegInfoByHwMode< [RV32, RV64, DefaultMode], [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; } def GPRX0 : RegisterClass<"RISCV", [XLenVT], 32, (add X0)> { let RegInfos = RegInfoByHwMode< [RV32, RV64, DefaultMode], [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; } // The order of registers represents the preferred allocation sequence. // Registers are listed in the order caller-save, callee-save, specials. def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (add (sequence "X%u", 10, 17), (sequence "X%u", 5, 7), (sequence "X%u", 28, 31), (sequence "X%u", 8, 9), (sequence "X%u", 18, 27), (sequence "X%u", 1, 4) )> { let RegInfos = RegInfoByHwMode< [RV32, RV64, DefaultMode], [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; } def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (add (sequence "X%u", 10, 17), (sequence "X%u", 5, 7), (sequence "X%u", 28, 31), (sequence "X%u", 8, 9), (sequence "X%u", 18, 27), X1, X3, X4 )> { let RegInfos = RegInfoByHwMode< [RV32, RV64, DefaultMode], [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; } def GPRC : RegisterClass<"RISCV", [XLenVT], 32, (add (sequence "X%u", 10, 15), (sequence "X%u", 8, 9) )> { let RegInfos = RegInfoByHwMode< [RV32, RV64, DefaultMode], [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; } // For indirect tail calls, we can't use callee-saved registers, as they are // restored to the saved value before the tail call, which would clobber a call // address. def GPRTC : RegisterClass<"RISCV", [XLenVT], 32, (add (sequence "X%u", 5, 7), (sequence "X%u", 10, 17), (sequence "X%u", 28, 31) )> { let RegInfos = RegInfoByHwMode< [RV32, RV64, DefaultMode], [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; } def SP : RegisterClass<"RISCV", [XLenVT], 32, (add X2)> { let RegInfos = RegInfoByHwMode< [RV32, RV64, DefaultMode], [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; } // Floating point registers let RegAltNameIndices = [ABIRegAltName] in { def F0_F : RISCVReg32<0, "f0", ["ft0"]>, DwarfRegNum<[32]>; def F1_F : RISCVReg32<1, "f1", ["ft1"]>, DwarfRegNum<[33]>; def F2_F : RISCVReg32<2, "f2", ["ft2"]>, DwarfRegNum<[34]>; def F3_F : RISCVReg32<3, "f3", ["ft3"]>, DwarfRegNum<[35]>; def F4_F : RISCVReg32<4, "f4", ["ft4"]>, DwarfRegNum<[36]>; def F5_F : RISCVReg32<5, "f5", ["ft5"]>, DwarfRegNum<[37]>; def F6_F : RISCVReg32<6, "f6", ["ft6"]>, DwarfRegNum<[38]>; def F7_F : RISCVReg32<7, "f7", ["ft7"]>, DwarfRegNum<[39]>; def F8_F : RISCVReg32<8, "f8", ["fs0"]>, DwarfRegNum<[40]>; def F9_F : RISCVReg32<9, "f9", ["fs1"]>, DwarfRegNum<[41]>; def F10_F : RISCVReg32<10,"f10", ["fa0"]>, DwarfRegNum<[42]>; def F11_F : RISCVReg32<11,"f11", ["fa1"]>, DwarfRegNum<[43]>; def F12_F : RISCVReg32<12,"f12", ["fa2"]>, DwarfRegNum<[44]>; def F13_F : RISCVReg32<13,"f13", ["fa3"]>, DwarfRegNum<[45]>; def F14_F : RISCVReg32<14,"f14", ["fa4"]>, DwarfRegNum<[46]>; def F15_F : RISCVReg32<15,"f15", ["fa5"]>, DwarfRegNum<[47]>; def F16_F : RISCVReg32<16,"f16", ["fa6"]>, DwarfRegNum<[48]>; def F17_F : RISCVReg32<17,"f17", ["fa7"]>, DwarfRegNum<[49]>; def F18_F : RISCVReg32<18,"f18", ["fs2"]>, DwarfRegNum<[50]>; def F19_F : RISCVReg32<19,"f19", ["fs3"]>, DwarfRegNum<[51]>; def F20_F : RISCVReg32<20,"f20", ["fs4"]>, DwarfRegNum<[52]>; def F21_F : RISCVReg32<21,"f21", ["fs5"]>, DwarfRegNum<[53]>; def F22_F : RISCVReg32<22,"f22", ["fs6"]>, DwarfRegNum<[54]>; def F23_F : RISCVReg32<23,"f23", ["fs7"]>, DwarfRegNum<[55]>; def F24_F : RISCVReg32<24,"f24", ["fs8"]>, DwarfRegNum<[56]>; def F25_F : RISCVReg32<25,"f25", ["fs9"]>, DwarfRegNum<[57]>; def F26_F : RISCVReg32<26,"f26", ["fs10"]>, DwarfRegNum<[58]>; def F27_F : RISCVReg32<27,"f27", ["fs11"]>, DwarfRegNum<[59]>; def F28_F : RISCVReg32<28,"f28", ["ft8"]>, DwarfRegNum<[60]>; def F29_F : RISCVReg32<29,"f29", ["ft9"]>, DwarfRegNum<[61]>; def F30_F : RISCVReg32<30,"f30", ["ft10"]>, DwarfRegNum<[62]>; def F31_F : RISCVReg32<31,"f31", ["ft11"]>, DwarfRegNum<[63]>; foreach Index = 0-31 in { def F#Index#_D : RISCVReg64("F"#Index#"_F")>, DwarfRegNum<[!add(Index, 32)]>; } } // The order of registers represents the preferred allocation sequence, // meaning caller-save regs are listed before callee-save. def FPR32 : RegisterClass<"RISCV", [f32], 32, (add (sequence "F%u_F", 0, 7), (sequence "F%u_F", 10, 17), (sequence "F%u_F", 28, 31), (sequence "F%u_F", 8, 9), (sequence "F%u_F", 18, 27) )>; def FPR32C : RegisterClass<"RISCV", [f32], 32, (add (sequence "F%u_F", 10, 15), (sequence "F%u_F", 8, 9) )>; // The order of registers represents the preferred allocation sequence, // meaning caller-save regs are listed before callee-save. def FPR64 : RegisterClass<"RISCV", [f64], 64, (add (sequence "F%u_D", 0, 7), (sequence "F%u_D", 10, 17), (sequence "F%u_D", 28, 31), (sequence "F%u_D", 8, 9), (sequence "F%u_D", 18, 27) )>; def FPR64C : RegisterClass<"RISCV", [f64], 64, (add (sequence "F%u_D", 10, 15), (sequence "F%u_D", 8, 9) )>; // Vector registers let RegAltNameIndices = [ABIRegAltName] in { foreach Index = 0-31 in { def V#Index : RISCVReg, DwarfRegNum<[!add(Index, 64)]>; } foreach Index = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] in { def V#Index#M2 : RISCVRegWithSubRegs("V"#Index), !cast("V"#!add(Index, 1))], ["v"#Index]>, DwarfRegAlias("V"#Index)> { let SubRegIndices = [sub_vrm2, sub_vrm2_hi]; } } foreach Index = [0, 4, 8, 12, 16, 20, 24, 28] in { def V#Index#M4 : RISCVRegWithSubRegs("V"#Index#"M2"), !cast("V"#!add(Index, 2)#"M2")], ["v"#Index]>, DwarfRegAlias("V"#Index)> { let SubRegIndices = [sub_vrm4, sub_vrm4_hi]; } } foreach Index = [0, 8, 16, 24] in { def V#Index#M8 : RISCVRegWithSubRegs("V"#Index#"M4"), !cast("V"#!add(Index, 4)#"M4")], ["v"#Index]>, DwarfRegAlias("V"#Index)> { let SubRegIndices = [sub_vrm8, sub_vrm8_hi]; } } def VTYPE : RISCVReg<0, "vtype", ["vtype"]>; def VL : RISCVReg<0, "vl", ["vl"]>; } class RegisterTypes reg_types> { list types = reg_types; } // The order of registers represents the preferred allocation sequence, // meaning caller-save regs are listed before callee-save. def VR : RegisterClass<"RISCV", [nxv8i8, nxv4i16, nxv2i32, nxv1i64], - 64, (add + 64, (add (sequence "V%u", 25, 31), (sequence "V%u", 8, 24), (sequence "V%u", 0, 7) )> { let Size = 64; } +def VRNoV0 : RegisterClass<"RISCV", [nxv8i8, nxv4i16, nxv2i32, nxv1i64], + 64, (add + (sequence "V%u", 25, 31), + (sequence "V%u", 8, 24), + (sequence "V%u", 1, 7) + )> { + let Size = 64; +} + def VRM2 : RegisterClass<"RISCV", [nxv16i8, nxv8i16, nxv4i32, nxv2i64], 64, (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2, V18M2, V20M2, V22M2, V24M2, V0M2, V2M2, V4M2, V6M2)> { let Size = 128; } def VRM4 : RegisterClass<"RISCV", [nxv32i8, nxv16i16, nxv8i32, nxv4i64], 64, (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V0M4, V4M4)> { let Size = 256; } def VRM8 : RegisterClass<"RISCV", [nxv32i16, nxv16i32, nxv8i64], 64, (add V8M8, V16M8, V24M8, V0M8)> { let Size = 512; } def VMaskVT : RegisterTypes<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, nxv32i1]>; def VM : RegisterClass<"RISCV", VMaskVT.types, 64, (add (sequence "V%u", 25, 31), (sequence "V%u", 8, 24), (sequence "V%u", 0, 7))> { let Size = 64; } def VMV0 : RegisterClass<"RISCV", VMaskVT.types, 64, (add V0)> { let Size = 64; } diff --git a/llvm/test/MC/RISCV/rvv/compare.s b/llvm/test/MC/RISCV/rvv/compare.s index f93aeac1796a..6b5eb213d0ec 100644 --- a/llvm/test/MC/RISCV/rvv/compare.s +++ b/llvm/test/MC/RISCV/rvv/compare.s @@ -1,351 +1,407 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ -# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST +# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ # RUN: | llvm-objdump -d --mattr=+experimental-v - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vmslt.vv v0, v4, v20, v0.t # CHECK-INST: vmslt.vv v0, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x00,0x4a,0x6c] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 00 4a 6c vmseq.vv v8, v4, v20, v0.t # CHECK-INST: vmseq.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x60] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 60 vmseq.vv v8, v4, v20 # CHECK-INST: vmseq.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x62] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 62 vmseq.vx v8, v4, a0, v0.t # CHECK-INST: vmseq.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x60] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 60 vmseq.vx v8, v4, a0 # CHECK-INST: vmseq.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x62] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 62 vmseq.vi v8, v4, 15, v0.t # CHECK-INST: vmseq.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x60] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 60 vmseq.vi v8, v4, 15 # CHECK-INST: vmseq.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x62] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 62 vmsne.vv v8, v4, v20, v0.t # CHECK-INST: vmsne.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x64] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 64 vmsne.vv v8, v4, v20 # CHECK-INST: vmsne.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x66] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 66 vmsne.vx v8, v4, a0, v0.t # CHECK-INST: vmsne.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x64] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 64 vmsne.vx v8, v4, a0 # CHECK-INST: vmsne.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x66] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 66 vmsne.vi v8, v4, 15, v0.t # CHECK-INST: vmsne.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x64] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 64 vmsne.vi v8, v4, 15 # CHECK-INST: vmsne.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x66] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 66 vmsltu.vv v8, v4, v20, v0.t # CHECK-INST: vmsltu.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x68] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 68 vmsltu.vv v8, v4, v20 # CHECK-INST: vmsltu.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x6a] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 6a vmsltu.vx v8, v4, a0, v0.t # CHECK-INST: vmsltu.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x68] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 68 vmsltu.vx v8, v4, a0 # CHECK-INST: vmsltu.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x6a] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 6a vmslt.vv v8, v4, v20, v0.t # CHECK-INST: vmslt.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x6c] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 6c vmslt.vv v8, v4, v20 # CHECK-INST: vmslt.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x6e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 6e vmslt.vx v8, v4, a0, v0.t # CHECK-INST: vmslt.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x6c] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 6c vmslt.vx v8, v4, a0 # CHECK-INST: vmslt.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x6e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 6e vmsleu.vv v8, v4, v20, v0.t # CHECK-INST: vmsleu.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x70] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 70 vmsleu.vv v8, v4, v20 # CHECK-INST: vmsleu.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x72] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 72 vmsleu.vx v8, v4, a0, v0.t # CHECK-INST: vmsleu.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x70] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 70 vmsleu.vx v8, v4, a0 # CHECK-INST: vmsleu.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x72] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 72 vmsleu.vi v8, v4, 15, v0.t # CHECK-INST: vmsleu.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x70] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 70 vmsleu.vi v8, v4, 15 # CHECK-INST: vmsleu.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x72] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 72 vmsle.vv v8, v4, v20, v0.t # CHECK-INST: vmsle.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x74] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 74 vmsle.vv v8, v4, v20 # CHECK-INST: vmsle.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x76] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 76 vmsle.vx v8, v4, a0, v0.t # CHECK-INST: vmsle.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x74] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 74 vmsle.vx v8, v4, a0 # CHECK-INST: vmsle.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x76] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 76 vmsle.vi v8, v4, 15, v0.t # CHECK-INST: vmsle.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x74] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 74 vmsle.vi v8, v4, 15 # CHECK-INST: vmsle.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x76] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 76 vmsgtu.vx v8, v4, a0, v0.t # CHECK-INST: vmsgtu.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x78] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 78 vmsgtu.vx v8, v4, a0 # CHECK-INST: vmsgtu.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x7a] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 7a vmsgtu.vi v8, v4, 15, v0.t # CHECK-INST: vmsgtu.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x78] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 78 vmsgtu.vi v8, v4, 15 # CHECK-INST: vmsgtu.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x7a] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 7a vmsgt.vx v8, v4, a0, v0.t # CHECK-INST: vmsgt.vx v8, v4, a0, v0.t # CHECK-ENCODING: [0x57,0x44,0x45,0x7c] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 7c vmsgt.vx v8, v4, a0 # CHECK-INST: vmsgt.vx v8, v4, a0 # CHECK-ENCODING: [0x57,0x44,0x45,0x7e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 44 45 7e vmsgt.vi v8, v4, 15, v0.t # CHECK-INST: vmsgt.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x7c] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 7c vmsgt.vi v8, v4, 15 # CHECK-INST: vmsgt.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x7e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 7e vmsgtu.vv v8, v20, v4, v0.t # CHECK-INST: vmsltu.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x68] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 68 vmsgtu.vv v8, v20, v4 # CHECK-INST: vmsltu.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x6a] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 6a vmsgt.vv v8, v20, v4, v0.t # CHECK-INST: vmslt.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x6c] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 6c vmsgt.vv v8, v20, v4 # CHECK-INST: vmslt.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x6e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 6e vmsgeu.vv v8, v20, v4, v0.t # CHECK-INST: vmsleu.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x70] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 70 vmsgeu.vv v8, v20, v4 # CHECK-INST: vmsleu.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x72] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 72 vmsge.vv v8, v20, v4, v0.t # CHECK-INST: vmsle.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x04,0x4a,0x74] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 74 vmsge.vv v8, v20, v4 # CHECK-INST: vmsle.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x04,0x4a,0x76] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 04 4a 76 vmsltu.vi v8, v4, 16, v0.t # CHECK-INST: vmsleu.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x70] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 70 vmsltu.vi v8, v4, 16 # CHECK-INST: vmsleu.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x72] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 72 vmslt.vi v8, v4, 16, v0.t # CHECK-INST: vmsle.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x74] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 74 vmslt.vi v8, v4, 16 # CHECK-INST: vmsle.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x76] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 76 vmsgeu.vi v8, v4, 16, v0.t # CHECK-INST: vmsgtu.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x78] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 78 vmsgeu.vi v8, v4, 16 # CHECK-INST: vmsgtu.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x7a] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 7a vmsge.vi v8, v4, 16, v0.t # CHECK-INST: vmsgt.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x7c] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 7c vmsge.vi v8, v4, 16 # CHECK-INST: vmsgt.vi v8, v4, 15 # CHECK-ENCODING: [0x57,0xb4,0x47,0x7e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 7e + +vmsgeu.vx v8, v4, a0 +# CHECK-INST: vmsltu.vx v8, v4, a0 +# CHECK-INST: vmnot.m v8, v8 +# CHECK-ENCODING: [0x57,0x44,0x45,0x6a,0x57,0x24,0x84,0x76] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 44 45 6a +# CHECK-UNKNOWN: 57 24 84 76 + +vmsge.vx v0, v4, a0 +# CHECK-INST: vmslt.vx v0, v4, a0 +# CHECK-INST: vmnot.m v0, v0 +# CHECK-ENCODING: [0x57,0x40,0x45,0x6e,0x57,0x20,0x00,0x76] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 40 45 6e +# CHECK-UNKNOWN: 57 20 00 76 + +vmsge.vx v8, v4, a0 +# CHECK-INST: vmslt.vx v8, v4, a0 +# CHECK-INST: vmnot.m v8, v8 +# CHECK-ENCODING: [0x57,0x44,0x45,0x6e,0x57,0x24,0x84,0x76] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 44 45 6e +# CHECK-UNKNOWN: 57 24 84 76 + +vmsgeu.vx v8, v4, a0, v0.t +# CHECK-INST: vmsltu.vx v8, v4, a0, v0.t +# CHECK-INST: vmxor.mm v8, v8, v0 +# CHECK-ENCODING: [0x57,0x44,0x45,0x68,0x57,0x24,0x80,0x6e] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 44 45 68 +# CHECK-UNKNOWN: 57 24 80 6e + +vmsge.vx v8, v4, a0, v0.t +# CHECK-INST: vmslt.vx v8, v4, a0, v0.t +# CHECK-INST: vmxor.mm v8, v8, v0 +# CHECK-ENCODING: [0x57,0x44,0x45,0x6c,0x57,0x24,0x80,0x6e] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 44 45 6c +# CHECK-UNKNOWN: 57 24 80 6e + +vmsgeu.vx v0, v4, a0, v0.t, v2 +# CHECK-INST: vmsltu.vx v2, v4, a0, v0.t +# CHECK-INST: vmandnot.mm v0, v0, v2 +# CHECK-ENCODING: [0x57,0x41,0x45,0x68,0x57,0x20,0x01,0x62] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 41 45 68 +# CHECK-UNKNOWN: 57 20 01 62 + +vmsge.vx v0, v4, a0, v0.t, v2 +# CHECK-INST: vmslt.vx v2, v4, a0, v0.t +# CHECK-INST: vmandnot.mm v0, v0, v2 +# CHECK-ENCODING: [0x57,0x41,0x45,0x6c,0x57,0x20,0x01,0x62] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 41 45 6c +# CHECK-UNKNOWN: 57 20 01 62 diff --git a/llvm/test/MC/RISCV/rvv/invalid.s b/llvm/test/MC/RISCV/rvv/invalid.s index 615dc08ad67c..79b4ea62f665 100644 --- a/llvm/test/MC/RISCV/rvv/invalid.s +++ b/llvm/test/MC/RISCV/rvv/invalid.s @@ -1,592 +1,600 @@ # RUN: not llvm-mc -triple=riscv64 --mattr=+experimental-v --mattr=+f %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR vsetvli a2, a0, e31 # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e32,m3 # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, m1,e32 # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e32,m16 # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e2048,m8 # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e1,m8 # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e8,m1,tx # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e8,m1,ta,mx # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e8,m1,ma # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vsetvli a2, a0, e8,m1,mu # CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu] vadd.vv v1, v3, v2, v4.t # CHECK-ERROR: operand must be v0.t vadd.vv v1, v3, v2, v0 # CHECK-ERROR: expected '.t' suffix vmslt.vi v1, v2, -16 # CHECK-ERROR: immediate must be in the range [-15, 16] vmslt.vi v1, v2, 17 # CHECK-ERROR: immediate must be in the range [-15, 16] viota.m v0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: viota.m v0, v2, v0.t viota.m v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: viota.m v2, v2 vfwcvt.xu.f.v v0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwcvt.xu.f.v v0, v2, v0.t vfwcvt.xu.f.v v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwcvt.xu.f.v v2, v2 vfwcvt.x.f.v v0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwcvt.x.f.v v0, v2, v0.t vfwcvt.x.f.v v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwcvt.x.f.v v2, v2 vfwcvt.f.xu.v v0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwcvt.f.xu.v v0, v2, v0.t vfwcvt.f.xu.v v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwcvt.f.xu.v v2, v2 vfwcvt.f.x.v v0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwcvt.f.x.v v0, v2, v0.t vfwcvt.f.x.v v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwcvt.f.x.v v2, v2 vfwcvt.f.f.v v0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwcvt.f.f.v v0, v2, v0.t vfwcvt.f.f.v v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwcvt.f.f.v v2, v2 vslideup.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vslideup.vx v0, v2, a0, v0.t vslideup.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vslideup.vx v2, v2, a0 vslideup.vi v0, v2, 31, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vslideup.vi v0, v2, 31, v0.t vslideup.vi v2, v2, 31 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vslideup.vi v2, v2, 31 vslide1up.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vslide1up.vx v0, v2, a0, v0.t vslide1up.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vslide1up.vx v2, v2, a0 vnsrl.wv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnsrl.wv v2, v2, v4 vnsrl.wx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnsrl.wx v2, v2, a0 vnsrl.wi v2, v2, 31 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnsrl.wi v2, v2, 31 vnsra.wv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnsra.wv v2, v2, v4 vnsra.wx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnsra.wx v2, v2, a0 vnsra.wi v2, v2, 31 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnsra.wi v2, v2, 31 vnclipu.wv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnclipu.wv v2, v2, v4 vnclipu.wx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnclipu.wx v2, v2, a0 vnclipu.wi v2, v2, 31 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnclipu.wi v2, v2, 31 vnclip.wv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnclip.wv v2, v2, v4 vnclip.wx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnclip.wx v2, v2, a0 vnclip.wi v2, v2, 31 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vnclip.wi v2, v2, 31 vfncvt.xu.f.w v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfncvt.xu.f.w v2, v2 vfncvt.x.f.w v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfncvt.x.f.w v2, v2 vfncvt.f.xu.w v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfncvt.f.xu.w v2, v2 vfncvt.f.x.w v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfncvt.f.x.w v2, v2 vfncvt.f.f.w v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfncvt.f.f.w v2, v2 vfncvt.rod.f.f.w v2, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfncvt.rod.f.f.w v2, v2 vrgather.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vrgather.vv v0, v2, v4, v0.t vrgather.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vrgather.vv v2, v2, v4 vrgather.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vrgather.vx v0, v2, a0, v0.t vrgather.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vrgather.vx v2, v2, a0 vrgather.vi v0, v2, 31, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vrgather.vi v0, v2, 31, v0.t vrgather.vi v2, v2, 31 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vrgather.vi v2, v2, 31 vwaddu.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwaddu.vv v0, v2, v4, v0.t vwaddu.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwaddu.vv v2, v2, v4 vwsubu.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsubu.vv v0, v2, v4, v0.t vwsubu.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwsubu.vv v2, v2, v4 vwadd.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwadd.vv v0, v2, v4, v0.t vwadd.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwadd.vv v2, v2, v4 vwsub.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsub.vv v0, v2, v4, v0.t vwsub.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwsub.vv v2, v2, v4 vwmul.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmul.vv v0, v2, v4, v0.t vwmul.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmul.vv v2, v2, v4 vwmulu.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmulu.vv v0, v2, v4, v0.t vwmulu.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmulu.vv v2, v2, v4 vwmulsu.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmulsu.vv v0, v2, v4, v0.t vwmulsu.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmulsu.vv v2, v2, v4 vwmaccu.vv v0, v4, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmaccu.vv v0, v4, v2, v0.t vwmaccu.vv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmaccu.vv v2, v4, v2 vwmacc.vv v0, v4, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmacc.vv v0, v4, v2, v0.t vwmacc.vv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmacc.vv v2, v4, v2 vwmaccsu.vv v0, v4, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmaccsu.vv v0, v4, v2, v0.t vwmaccsu.vv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmaccsu.vv v2, v4, v2 vfwadd.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwadd.vv v0, v2, v4, v0.t vfwadd.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwadd.vv v2, v2, v4 vfwsub.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwsub.vv v0, v2, v4, v0.t vfwsub.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwsub.vv v2, v2, v4 vfwmul.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwmul.vv v0, v2, v4, v0.t vfwmul.vv v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwmul.vv v2, v2, v4 vfwmacc.vv v0, v4, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwmacc.vv v0, v4, v2, v0.t vfwmacc.vv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwmacc.vv v2, v4, v2 vfwnmacc.vv v0, v4, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwnmacc.vv v0, v4, v2, v0.t vfwnmacc.vv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwnmacc.vv v2, v4, v2 vfwmsac.vv v0, v4, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwmsac.vv v0, v4, v2, v0.t vfwmsac.vv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwmsac.vv v2, v4, v2 vfwnmsac.vv v0, v4, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwnmsac.vv v0, v4, v2, v0.t vfwnmsac.vv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwnmsac.vv v2, v4, v2 vwaddu.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwaddu.vx v0, v2, a0, v0.t vwaddu.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwaddu.vx v2, v2, a0 vwsubu.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsubu.vx v0, v2, a0, v0.t vwsubu.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwsubu.vx v2, v2, a0 vwadd.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwadd.vx v0, v2, a0, v0.t vwadd.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwadd.vx v2, v2, a0 vwsub.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsub.vx v0, v2, a0, v0.t vwsub.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwsub.vx v2, v2, a0 vwmul.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmul.vx v0, v2, a0, v0.t vwmul.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmul.vx v2, v2, a0 vwmulu.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmulu.vx v0, v2, a0, v0.t vwmulu.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmulu.vx v2, v2, a0 vwmulsu.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmulsu.vx v0, v2, a0, v0.t vwmulsu.vx v2, v2, a0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmulsu.vx v2, v2, a0 vwmaccu.vx v0, a0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmaccu.vx v0, a0, v2, v0.t vwmaccu.vx v2, a0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmaccu.vx v2, a0, v2 vwmacc.vx v0, a0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmacc.vx v0, a0, v2, v0.t vwmacc.vx v2, a0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmacc.vx v2, a0, v2 vwmaccsu.vx v0, a0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmaccsu.vx v0, a0, v2, v0.t vwmaccsu.vx v2, a0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmaccsu.vx v2, a0, v2 vwmaccus.vx v0, a0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwmaccus.vx v0, a0, v2, v0.t vwmaccus.vx v2, a0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwmaccus.vx v2, a0, v2 vfwadd.vf v0, v2, fa0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwadd.vf v0, v2, fa0, v0.t vfwadd.vf v2, v2, fa0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwadd.vf v2, v2, fa0 vfwsub.vf v0, v2, fa0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwsub.vf v0, v2, fa0, v0.t vfwsub.vf v2, v2, fa0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwsub.vf v2, v2, fa0 vfwmul.vf v0, v2, fa0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwmul.vf v0, v2, fa0, v0.t vfwmul.vf v2, v2, fa0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwmul.vf v2, v2, fa0 vfwmacc.vf v0, fa0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwmacc.vf v0, fa0, v2, v0.t vfwmacc.vf v2, fa0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwmacc.vf v2, fa0, v2 vfwnmacc.vf v0, fa0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwnmacc.vf v0, fa0, v2, v0.t vfwnmacc.vf v2, fa0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwnmacc.vf v2, fa0, v2 vfwmsac.vf v0, fa0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwmsac.vf v0, fa0, v2, v0.t vfwmsac.vf v2, fa0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwmsac.vf v2, fa0, v2 vfwnmsac.vf v0, fa0, v2, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwnmsac.vf v0, fa0, v2, v0.t vfwnmsac.vf v2, fa0, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwnmsac.vf v2, fa0, v2 vcompress.vm v2, v2, v4 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vcompress.vm v2, v2, v4 vwaddu.wv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwaddu.wv v0, v2, v4, v0.t vwaddu.wv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwaddu.wv v2, v4, v2 vwsubu.wv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsubu.wv v0, v2, v4, v0.t vwsubu.wv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwsubu.wv v2, v4, v2 vwadd.wv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwadd.wv v0, v2, v4, v0.t vwadd.wv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwadd.wv v2, v4, v2 vwsub.wv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsub.wv v0, v2, v4, v0.t vwsub.wv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vwsub.wv v2, v4, v2 vfwadd.wv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwadd.wv v0, v2, v4, v0.t vfwadd.wv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwadd.wv v2, v4, v2 vfwsub.wv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwsub.wv v0, v2, v4, v0.t vfwsub.wv v2, v4, v2 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vfwsub.wv v2, v4, v2 vwaddu.wx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwaddu.wx v0, v2, a0, v0.t vwsubu.wx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsubu.wx v0, v2, a0, v0.t vwadd.wx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwadd.wx v0, v2, a0, v0.t vwsub.wx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vwsub.wx v0, v2, a0, v0.t vfwadd.wf v0, v2, fa0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwadd.wf v0, v2, fa0, v0.t vfwsub.wf v0, v2, fa0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfwsub.wf v0, v2, fa0, v0.t vadc.vvm v0, v2, v4, v0 # CHECK-ERROR: The destination vector register group cannot be V0. # CHECK-ERROR-LABEL: vadc.vvm v0, v2, v4, v0 vmadc.vvm v2, v2, v4, v0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vmadc.vvm v2, v2, v4, v0 vmadc.vvm v4, v2, v4, v0 # CHECK-ERROR: The destination vector register group cannot overlap the source vector register group. # CHECK-ERROR-LABEL: vmadc.vvm v4, v2, v4, v0 vadd.vv v0, v2, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vadd.vv v0, v2, v4, v0.t vadd.vx v0, v2, a0, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vadd.vx v0, v2, a0, v0.t vadd.vi v0, v2, 1, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vadd.vi v0, v2, 1, v0.t + +vmsge.vx v0, v4, a0, v0.t +# CHECK-ERROR: too few operands for instruction +# CHECK-ERROR-LABEL: vmsge.vx v0, v4, a0, v0.t + +vmsge.vx v8, v4, a0, v0.t, v2 +# CHECK-ERROR: invalid operand for instruction +# CHECK-ERROR-LABEL: vmsge.vx v8, v4, a0, v0.t, v2