Index: include/llvm/CodeGen/GlobalISel/InstructionSelectTestgen.h =================================================================== --- /dev/null +++ include/llvm/CodeGen/GlobalISel/InstructionSelectTestgen.h @@ -0,0 +1,30 @@ +//===-- GlobalISel/InstructionSelectTestgen.h -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file This file describes the interface of the ModulePass responsible +/// for auto-generating regression tests for the InstructionSelect pass of +/// GlobalISel. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_TESTGEN_H +#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_TESTGEN_H + +#include "llvm/Pass.h" + +namespace llvm { +class InstructionSelectTestgen : public ModulePass { +public: + static char ID; + InstructionSelectTestgen() : ModulePass(ID) {} + StringRef getPassName() const override { return "InstructionSelectTestgen"; } + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnModule(Module &M) override; +}; +} // namespace llvm + +#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_TESTGEN_H Index: include/llvm/CodeGen/GlobalISel/InstructionSelector.h =================================================================== --- include/llvm/CodeGen/GlobalISel/InstructionSelector.h +++ include/llvm/CodeGen/GlobalISel/InstructionSelector.h @@ -19,6 +19,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/GlobalISel/InstructionSelectorTestgen.h" #include "llvm/Support/CodeGenCoverage.h" #include #include @@ -297,9 +298,76 @@ /// - RuleID - The ID of the rule that was covered. GIR_Coverage, + /// Keeping track of the number of the GI opcodes. Must be the last entry. GIU_NumOpcodes, }; +/// Maintaining some meta-information on GI opcodes. +struct GIOpcodeMeta { + GIOpcodeMeta() { + static constexpr unsigned GIU_NumOperands[GIU_NumOpcodes][2] = { + {GIM_Try, 1}, + {GIM_RecordInsn, 3}, + {GIM_CheckFeatures, 1}, + {GIM_CheckOpcode, 2}, + {GIM_CheckNumOperands, 2}, + {GIM_CheckI64ImmPredicate, 2}, + {GIM_CheckAPIntImmPredicate, 2}, + {GIM_CheckAPFloatImmPredicate, 2}, + {GIM_CheckAtomicOrdering, 2}, + {GIM_CheckAtomicOrderingOrStrongerThan, 2}, + {GIM_CheckAtomicOrderingWeakerThan, 2}, + {GIM_CheckMemorySizeEqualTo, 3}, + {GIM_CheckMemorySizeEqualToLLT, 3}, + {GIM_CheckMemorySizeLessThanLLT, 3}, + {GIM_CheckMemorySizeGreaterThanLLT, 3}, + {GIM_CheckType, 3}, + {GIM_CheckPointerToAny, 3}, + {GIM_CheckRegBankForClass, 3}, + {GIM_CheckComplexPattern, 4}, + {GIM_CheckConstantInt, 3}, + {GIM_CheckLiteralInt, 3}, + {GIM_CheckIntrinsicID, 3}, + {GIM_CheckIsMBB, 2}, + {GIM_CheckIsSafeToFold, 1}, + {GIM_CheckIsSameOperand, 4}, + {GIM_Reject, 0}, + + {GIR_MutateOpcode, 3}, + {GIR_BuildMI, 2}, + {GIR_Copy, 3}, + {GIR_CopyOrAddZeroReg, 4}, + {GIR_CopySubReg, 4}, + {GIR_AddImplicitDef, 2}, + {GIR_AddImplicitUse, 2}, + {GIR_AddRegister, 2}, + {GIR_AddTempRegister, 3}, + {GIR_AddImm, 2}, + {GIR_ComplexRenderer, 2}, + {GIR_ComplexSubOperandRenderer, 3}, + {GIR_CustomRenderer, 3}, + {GIR_CopyConstantAsSImm, 2}, + {GIR_CopyFConstantAsFPImm, 2}, + {GIR_ConstrainOperandRC, 3}, + {GIR_ConstrainSelectedInstOperands, 1}, + {GIR_MergeMemOperands, 3}, + {GIR_EraseFromParent, 1}, + {GIR_MakeTempReg, 2}, + {GIR_Done, 0}, + {GIR_Coverage, 1}, + }; +#ifndef NDEBUG + bool Visited[GIU_NumOpcodes] = {}; +#endif + for (const auto &R : GIU_NumOperands) { + assert(!Visited[R[0]] && (Visited[R[0]] = true) && + "GIU_NumOperands table contains duplicates or missing a record"); + NumOperands[R[0]] = R[1]; + } + } + unsigned NumOperands[GIU_NumOpcodes]; +}; + enum { /// Indicates the end of the variable-length MergeInsnID list in a /// GIR_MergeMemOperands opcode. @@ -400,6 +468,12 @@ /// MI and IntoMI do not need to be in the same basic blocks, but MI must /// preceed IntoMI. bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const; + +public: + virtual std::unique_ptr getTestgen() const { + llvm_unreachable("Subclasses must use tablegen'erated GlobalISel " + "implementation to auto-generate test cases"); + } }; } // end namespace llvm Index: include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h =================================================================== --- include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h +++ include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h @@ -107,6 +107,7 @@ } MachineInstr *NewMI = MRI.getVRegDef(MO.getReg()); + assert(NewMI && "Expected a vreg definition"); if ((size_t)NewInsnID < State.MIs.size()) State.MIs[NewInsnID] = NewMI; else { Index: include/llvm/CodeGen/GlobalISel/InstructionSelectorTestgen.h =================================================================== --- /dev/null +++ include/llvm/CodeGen/GlobalISel/InstructionSelectorTestgen.h @@ -0,0 +1,63 @@ +//===- llvm/CodeGen/GlobalISel/InstructionSelectorTestgen.h -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file This file declares the API for the instruction selector testgen. +/// The class is responsible for auto-generating regression tests for the +/// InstructionSelect pass of GlobalISel on rule by rule basis. +/// The basic and fully functional implementation is TableGen'erated, the +/// class is used by the InstructionSelectTestgen pass. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_TESTGEN_H +#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_TESTGEN_H + +#include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/Support/CommandLine.h" + +namespace llvm { + +/// Force all target (as well as function and module) features to be available. +extern cl::opt TestgenSetAllFeatures; + +class InstructionSelectorTestgen { +public: + virtual ~InstructionSelectorTestgen() = default; + + virtual void generateTestCases(Module &M, MachineModuleInfo &MMI) const = 0; + + virtual bool checkFeatures(unsigned FeatureBitsetID) const = 0; + virtual LLT getTypeObject(unsigned TypeObjectID) const = 0; + virtual const TargetInstrInfo &getTII() const = 0; + virtual const TargetRegisterInfo &getTRI() const = 0; + virtual const RegisterBankInfo &getRBI() const = 0; + virtual const int64_t *getMatchTable() const = 0; + + static MachineIRBuilder createEmptyTestCase(StringRef Name, Module &M, + MachineModuleInfo &MMI); + static void emitReturnIntoTestCase(MachineIRBuilder &MIRBuilder); + static Type *deduceIRType(LLT LLTy, LLVMContext &Context); + + class InputRegAllocator; + +protected: + InstructionSelectorTestgen() = default; + + void generateTestCasesImpl(Module &M, MachineModuleInfo &MMI) const; + +private: + void generateTestCase(uint64_t CurrentIdx, InputRegAllocator &RA, + MachineIRBuilder &MIRBuilder) const; +}; + +} // namespace llvm + +#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_TESTGEN_H Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -181,6 +181,7 @@ void initializeInstrProfilingLegacyPassPass(PassRegistry&); void initializeInstructionCombiningPassPass(PassRegistry&); void initializeInstructionSelectPass(PassRegistry&); +void initializeInstructionSelectTestgenPass(PassRegistry&); void initializeInterleavedAccessPass(PassRegistry&); void initializeInternalizeLegacyPassPass(PassRegistry&); void initializeIntervalPartitionPass(PassRegistry&); Index: lib/CodeGen/GlobalISel/CMakeLists.txt =================================================================== --- lib/CodeGen/GlobalISel/CMakeLists.txt +++ lib/CodeGen/GlobalISel/CMakeLists.txt @@ -6,6 +6,8 @@ IRTranslator.cpp InstructionSelect.cpp InstructionSelector.cpp + InstructionSelectTestgen.cpp + InstructionSelectorTestgen.cpp LegalityPredicates.cpp LegalizeMutations.cpp Legalizer.cpp Index: lib/CodeGen/GlobalISel/GlobalISel.cpp =================================================================== --- lib/CodeGen/GlobalISel/GlobalISel.cpp +++ lib/CodeGen/GlobalISel/GlobalISel.cpp @@ -22,4 +22,5 @@ initializeLocalizerPass(Registry); initializeRegBankSelectPass(Registry); initializeInstructionSelectPass(Registry); + initializeInstructionSelectTestgenPass(Registry); } Index: lib/CodeGen/GlobalISel/InstructionSelectTestgen.cpp =================================================================== --- /dev/null +++ lib/CodeGen/GlobalISel/InstructionSelectTestgen.cpp @@ -0,0 +1,79 @@ +//===- llvm/CodeGen/GlobalISel/InstructionSelectTestgen.cpp ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the InstructionSelectTestgen class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/InstructionSelectTestgen.h" +#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/IR/Verifier.h" + +#define DEBUG_TYPE "instruction-select-testgen" + +using namespace llvm; + +char InstructionSelectTestgen::ID = 0; +INITIALIZE_PASS(InstructionSelectTestgen, DEBUG_TYPE, + "Generate instruction selection test cases", false, false) + +void InstructionSelectTestgen::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired(); +} + +static const MachineFunction &createReturnOnlyTestCase(Module &M, + MachineModuleInfo &MMI) { + MachineIRBuilder MIRBuilder = + InstructionSelectorTestgen::createEmptyTestCase("test_return", M, MMI); + InstructionSelectorTestgen::emitReturnIntoTestCase(MIRBuilder); + MachineFunction &MF = MIRBuilder.getMF(); + MF.getProperties().set(MachineFunctionProperties::Property::Legalized); + MF.getProperties().set(MachineFunctionProperties::Property::RegBankSelected); + return MF; +} + +bool InstructionSelectTestgen::runOnModule(Module &M) { + assert(!verifyModule(M, &dbgs()) && "Input module is not valid"); + + MachineModuleInfo &MMI = getAnalysis(); + + for (const Function &F : M.functions()) + if (const MachineFunction *MF = MMI.getMachineFunction(F)) + // Generally it is possible to run the Testgen over a non-empty module, + // practically probably a lit-test, and have it add additional test-cases + // to it. The easiest way to limit the number of verification failures on + // the final output caused by initially present machine functions rather + // than the ones added, is to run the verification on those already + // present functions first. + MF->verify(this, "Pre-existing function", /*AbortOnErrors=*/true); + + // Looks like we need to start off with at least some machine function as + // there is no such thing as a machine module, in particular, we need to gain + // access to the Target and underlying InstructionSelector and + // InstructionSelectorTestgen objects. + const MachineFunction &MF = createReturnOnlyTestCase(M, MMI); + + // An early Testgen sanity check, in particular, see if the return sequence + // common to all the following test cases makes sense: + MF.verify(this, "Return only test function", /*AbortOnErrors=*/true); + + const InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector(); + assert(ISel && "Can not work without InstructionSelector"); + + const std::unique_ptr Testgen = + ISel->getTestgen(); + assert(Testgen && "Can not work without InstructionSelectorTestgen"); + Testgen->generateTestCases(M, MMI); + + for (const Function &F : M.functions()) + if (const MachineFunction *MF = MMI.getMachineFunction(F)) + MF->verify(this, "Output machine function", /*AbortOnErrors=*/true); + + assert(!verifyModule(M, &dbgs()) && "Output module is not valid"); + return true; +} Index: lib/CodeGen/GlobalISel/InstructionSelectorTestgen.cpp =================================================================== --- /dev/null +++ lib/CodeGen/GlobalISel/InstructionSelectorTestgen.cpp @@ -0,0 +1,1191 @@ +//===- llvm/CodeGen/GlobalISel/InstructionSelectorTestgen.cpp -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This file implements the InstructionSelectorTestgen class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" +#include "llvm/IR/Verifier.h" + +#define DEBUG_TYPE "instructionselector-testgen" + +using namespace llvm; + +cl::opt TestgenFromRule( + "testgen-from-rule", + cl::desc("Generate test cases for instruction selector rules from the one " + "specified (by Rule ID, inclusive)"), + cl::init(std::numeric_limits::min()), cl::Hidden); + +cl::opt TestgenUntilRule( + "testgen-until-rule", + cl::desc("Generate test cases for instruction selector rules until the one " + "specified (by Rule ID, inclusive)"), + cl::init(std::numeric_limits::max()), cl::Hidden); + +cl::list TestgenExcludeRules( + "testgen-exclude-rules", + cl::desc("Don't try to generate test cases for the instruction selector " + "rules listed (by Rule ID)"), + cl::CommaSeparated, cl::Hidden); + +cl::list TestgenIncludeOnly( + "testgen-include-only", + cl::desc("Don't try to generate test cases for any instruction selector " + "rules but the ones listed (by Rule ID, every other testgen- " + "filtering option is ignored)"), + cl::CommaSeparated, cl::Hidden); + +cl::opt TestgenFilterByRuleNum( + "testgen-filter-by-rule-number", + cl::desc("Interpret all other testgen filtering option values as Rule Nums " + "(MatchTable positions) instead of Rule IDs"), + cl::Hidden); + +cl::opt TestgenQuery( + "testgen-query", + cl::desc("Just output the list of rules & duplicates analysis"), + cl::Hidden); + +cl::opt + TestgenNoABI("testgen-no-abi", + cl::desc("Don't try to imitate proper ABI boundaries"), + cl::Hidden); + +cl::opt llvm::TestgenSetAllFeatures( + "testgen-set-all-features", + cl::desc("Force all target features to be available"), cl::Hidden); + +MachineIRBuilder +InstructionSelectorTestgen::createEmptyTestCase(StringRef Name, Module &M, + MachineModuleInfo &MMI) { + LLVMContext &Context = M.getContext(); + + Function &F = *cast(M.getOrInsertFunction( + Name, FunctionType::get(Type::getVoidTy(Context), /*isVarArg=*/false))); + auto *BB = F.empty() ? BasicBlock::Create(Context, "entry", &F) : &F.front(); + if (BB->empty()) + new UnreachableInst(Context, BB); + assert(!verifyFunction(F, &dbgs()) && "Empty test IR is broken"); + + MachineFunction &MF = MMI.getOrCreateMachineFunction(F); + // In case the test case with that particular name already existed and we are + // in the update mode: + MF.reset(); + + MachineBasicBlock *MBB = MF.CreateMachineBasicBlock(BB); + MF.push_back(MBB); + MF.verify(/*Pass=*/nullptr, "Empty test function", /*AbortOnErrors=*/true); + + MachineIRBuilder MIRBuilder(MF); + MIRBuilder.setMBB(*MBB); + return MIRBuilder; +} + +void InstructionSelectorTestgen::emitReturnIntoTestCase( + MachineIRBuilder &MIRBuilder) { + MachineBasicBlock &MBB = MIRBuilder.getMBB(); + unsigned RetVReg = 0; + if (!MBB.empty()) { + const MachineInstr &RootInstr = MBB.back(); + if (RootInstr.getNumOperands()) { + const MachineOperand &MO = RootInstr.getOperand(0); + if (MO.isReg() && MO.isDef()) + RetVReg = MO.getReg(); + } + } + auto InsnB = MIRBuilder.buildInstr(TargetOpcode::PATCHABLE_RET).addDef(0); + if (RetVReg) + InsnB.addUse(RetVReg); +} + +Type *InstructionSelectorTestgen::deduceIRType(LLT LLTy, LLVMContext &Context) { + assert(LLTy.isValid() && "Didn't expect an invalid LLT as input"); + if (LLTy.isVector()) + return VectorType::get(deduceIRType(LLTy.getElementType(), Context), + LLTy.getNumElements()); + if (LLTy.isPointer()) + return PointerType::get(Type::getInt32Ty(Context), LLTy.getAddressSpace()); + return Type::getIntNTy(Context, LLTy.getSizeInBits()); +} + +// A default type for an operand (a generic virtual register most notably) if we +// need to create one w/o knowing yet what type (size) should it have. +static const LLT DummyLLT = LLT::scalar(32); + +// Guess LLT that describes the machine memory operand (imposes the number of +// bytes of memory read / written and alignment requirements). As the +// instruction could implicitly extend or truncate the value read from / written +// to memory, the size of its machine memory operand (if any) is not helpful, +// and the resulting LLT mostly describes the kind of value (scalar / pointer) +// and its topology (number of vector lanes). +static LLT deduceMemOperandLLT(const MachineInstr *MI, + const MachineRegisterInfo &MRI) { + for (const MachineOperand &MO : MI->operands()) + if (MO.isReg() && MO.getReg() && MRI.getType(MO.getReg()).isValid()) + return MRI.getType(MO.getReg()); + return DummyLLT; +} + +// Keep the kind and topology of an LLT as much as possible but enforce a +// specific full size in bits. Should never fail. +static LLT setNewSizeToLLT(LLT OldLLT, unsigned NewSizeInBits) { + if (!OldLLT.isValid()) + return LLT::scalar(NewSizeInBits); + + // Keep a pointer a pointer with the same address space + if (OldLLT.getSizeInBits() == NewSizeInBits) + return OldLLT; + + if (OldLLT.isVector()) { + const unsigned NumElts = OldLLT.getNumElements(); + if (NewSizeInBits % NumElts == 0) + return LLT::vector(NumElts, setNewSizeToLLT(OldLLT.getElementType(), + NewSizeInBits / NumElts)); + } + // Do not change a vector topology or a pointer size, just return a scalar + // with the size requested instead: + return LLT::scalar(NewSizeInBits); +} + +// RegisterBankInfo's getRegBankFromRegClass is only required to be defined on +// user-defined register classes. It could be unable to map TableGen'erated +// register classes (intersections, for instance) to their reg banks. +static const RegisterBank * +getUniqRegBankFromRegClass(const RegisterBankInfo &RBI, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI = nullptr) { + DEBUG(if (TRI) dbgs() << "Register class " << TRI->getRegClassName(RC) + << " is covered by the following register bank:"); + const RegisterBank *Result = nullptr; + for (unsigned I = 0, E = RBI.getNumRegBanks(); I != E; ++I) { + const RegisterBank &RB = RBI.getRegBank(I); + if (RB.covers(*RC)) { + DEBUG(if (TRI) dbgs() << " " << RB.getName()); + if (Result) { + // Some targets, like AMDGPU, have register super-classes that cross + // multiple banks. Apparently in the current implementation a register + // bank "covers" a class as soon as it touches it, it doesn't mean it + // actually includes all the registers of the register class. + DEBUG(if (TRI) dbgs() << " (Multiple banks detected!)\n"); + return nullptr; + } + Result = &RB; + } + } + DEBUG(if (TRI) dbgs() << (Result ? "\n" : " (No bank found!)\n")); + return Result; +} + +static const RegisterBank * +getSomeRegBankFromRegClass(const RegisterBankInfo &RBI, + const TargetRegisterClass *RC) { + for (unsigned I = 0, E = RBI.getNumRegBanks(); I != E; ++I) { + const RegisterBank &RB = RBI.getRegBank(I); + if (RB.covers(*RC)) + return &RB; + } + return nullptr; +} + +/// A helper providing sensible phys regs to define patterns' input vregs. +/// +/// It appears that the most portable and robust way to define all the input +/// vregs (used, but not defined) of an instruction sequence being generated is +/// to define them as COPY's from appropriate and preferably distinct physical +/// registers, live-in to the basic containing the instruction sequence and +/// preferably the entire function as well. The best pick is to have the same +/// size in bits as the vreg, same register bank, to be allocatable, and from +/// the beginning of the allocation order. The class also handles the following +/// issues: +/// +/// 1) RegisterBankInfo::getRegBankFromRegClass not being defined for +/// tablegen'erated reg classes. +/// 2) Register classes containing the same physical register and yet having +/// different sizes, and therefore physical registers having only weekly +/// defined size as the maximum of the sizes of all register classes they +/// belong to. +/// 3) Register banks not having a full list of register sizes available +/// directly. +/// +/// The latter is also used for picking sensible register banks for internal +/// (defined and used both by the instruction sequence being generated) vregs. +class InstructionSelectorTestgen::InputRegAllocator { +public: + InputRegAllocator(const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI, + const MachineFunction &MF) { + // Classes processed first have the largest impact on the resulting order as + // we only append non-visited registers. Therefore, it's better to process + // the largest classes first: + SmallVector RegClasses(TRI.regclasses()); + std::stable_sort( + RegClasses.begin(), RegClasses.end(), + [](const TargetRegisterClass *RC1, const TargetRegisterClass *RC2) { + // For the reg classes of the same size, it's better to start with + // more concrete ones so we don't overuse weird stuff like register + // tuples (see AArch64) and whatnot, so stable sort this way here, + // reverse later: + return RC1->getNumRegs() < RC2->getNumRegs(); + }); + std::reverse(RegClasses.begin(), RegClasses.end()); + + // Every physical register belongs to a unique register bank, but it may + // belong to register classes of different sizes. Therefore, "visited" + // property needs to be per size: + SmallDenseMap Size2VisitedPhysRegs; + for (const TargetRegisterClass *RC : RegClasses) + // If the register class crosses multiple register banks don't use it to + // determine the registers contained in register banks: + if (const RegisterBank *RB = getUniqRegBankFromRegClass(RBI, RC, &TRI)) { + const unsigned Size = TRI.getRegSizeInBits(*RC); + // RegisterClassInfo::getOrder is not available as the reserved + // registers set is not freezed yet: + for (MCPhysReg PhysReg : RC->getRawAllocationOrder(MF)) + if (!setVisited(Size2VisitedPhysRegs[Size], PhysReg)) + SizeRegBank2Regs[std::make_pair(Size, RB)].first.push_back(PhysReg); + } + // Initializing TrueSizeRegBank2Regs and Reg2TrueSize + SmallVector Keys; + for (const auto &Item : SizeRegBank2Regs) + Keys.push_back(Item.first); + std::sort(Keys.begin(), Keys.end(), std::greater()); + BitVector VisitedPhysRegs; + for (const auto Key : Keys) + for (const MCPhysReg PhysReg : SizeRegBank2Regs[Key].first) + if (!setVisited(VisitedPhysRegs, PhysReg)) { + Reg2TrueSize[PhysReg] = Key.first; + TrueSizeRegBank2Regs[Key].first.push_back(PhysReg); + } + } + + /// Returns an inferred actual size of the physical register: true size. + /// As a physical register can belong to multiple register classes + /// with different register sizes (in bits), the actual size of the + /// register could only be guessed as the maximum of all sizes: + unsigned getTrueRegSize(MCPhysReg PhysReg) const { + return Reg2TrueSize.find(PhysReg)->second; + } + + /// See if there is a phys reg available for the given size and reg bank. + /// \p ByTrueSize has the same meaning as in next(...) member function. + bool hasNext(unsigned SizeInBits, const RegisterBank &RB, + bool ByTrueSize = false) const { + const auto &Map = ByTrueSize ? TrueSizeRegBank2Regs : SizeRegBank2Regs; + return Map.count(std::make_pair(SizeInBits, &RB)); + } + + /// Get the next available phys reg for the given size and reg bank. + /// \pre hasNext(...) returns true + /// \p ByTrueSize indicates if the physical register requested must have the + /// given size as its true size (maximum of the sizes of all register classes + /// it belongs to), or it's enough for it to have at least one register class + /// containing it with the size specified. + MCPhysReg next(unsigned SizeInBits, const RegisterBank &RB, + bool ByTrueSize = false) { + assert(hasNext(SizeInBits, RB, ByTrueSize) && "Didn't find an allocatable " + "physical register for the " + "register bank and size"); + auto &Map = ByTrueSize ? TrueSizeRegBank2Regs : SizeRegBank2Regs; + auto &Pool = Map[std::make_pair(SizeInBits, &RB)]; + MCPhysReg AllocatedPhysReg = Pool.first[Pool.second]; + Pool.second = (Pool.second + 1) % Pool.first.size(); + return AllocatedPhysReg; + } + + /// Continue all further phys reg allocations from the beginning of the + /// allocation order. + InputRegAllocator &reset() { + for (auto &Item : SizeRegBank2Regs) + Item.second.second = 0; + for (auto &Item : TrueSizeRegBank2Regs) + Item.second.second = 0; + return *this; + } + + using Size2RegBanksTy = + std::map>; + + /// Get a map from a register (class) size in bits to the full set of register + /// banks containing at least one (non-empty) register class of that size. + const Size2RegBanksTy &getSize2RegBanks() const { + if (Size2RegBanks.empty()) + for (const auto &Item : SizeRegBank2Regs) + Size2RegBanks[Item.first.first].insert(Item.first.second); + return Size2RegBanks; + } + + void dump(const TargetRegisterInfo &TRI) const { + for (const auto &Item : SizeRegBank2Regs) { + dbgs() << "Phys regs of size " << Item.first.first + << " bits from reg bank " << Item.first.second->getName() << ":"; + for (const MCPhysReg PhysReg : Item.second.first) + dbgs() << " " << TRI.getRegAsmName(PhysReg); + dbgs() << "\n"; + } + } + +private: + using SizeRegBank2RegsTy = + DenseMap, + std::pair, unsigned>>; + + // All the physical registers available per bank and size in the preferred + // allocation order with an "allocated so far" index attached: + SizeRegBank2RegsTy SizeRegBank2Regs; + + // getTrueRegSize's cache + DenseMap Reg2TrueSize; + + // All the physical registers available per bank and true size in the + // preferred allocation order with an "allocated so far" index attached: + SizeRegBank2RegsTy TrueSizeRegBank2Regs; + + // getSize2RegBanks' cache + mutable Size2RegBanksTy Size2RegBanks; + + static bool setVisited(BitVector &VisitedPhysRegs, MCPhysReg PhysReg) { + if (PhysReg >= VisitedPhysRegs.size()) + VisitedPhysRegs.resize(PhysReg + 1); + if (!VisitedPhysRegs[PhysReg]) { + VisitedPhysRegs[PhysReg] = true; + return false; + } + return true; + } +}; + +using InputRegAllocator = InstructionSelectorTestgen::InputRegAllocator; + +/// Speculatively assign a sensible register bank to the \p Reg provided. +/// +/// Do nothing if the \p Reg already has a bank assigned, otherwise pick a bank +/// based on vreg's size and the register bank frequency analysis provided via +/// \p Hist. Update \p Hist to reflect the changes. +static void +assignRegBank(unsigned Reg, + const InputRegAllocator::Size2RegBanksTy &Size2RegBanks, + SmallDenseMap &Hist, + MachineFunction &MF) { + MachineRegisterInfo &MRI = MF.getRegInfo(); + if (MRI.getRegBankOrNull(Reg)) + return; + const LLT LLTy = MRI.getType(Reg); + unsigned Size = LLTy.getSizeInBits(); + // Some of the vregs are so small that it's not possible to allocate equally + // sized phys regs for them. In that case we'd better find the closest + // allocatable size still sufficiently large to initialize all bits of the + // vreg. We can't do the same in a robust and portable way if the vreg is + // larger then any allocatable phys reg though. + const auto I = Size2RegBanks.lower_bound(Size); + if (I == Size2RegBanks.end()) { + DEBUG(dbgs() + << "- Didn't find a register bank containing allocatable registers\n" + " of a compatible size; vreg: %" + << TargetRegisterInfo::virtReg2Index(Reg) << "(" << LLTy + << "), size: " << Size << " bits (or larger).\n"); + // This test will definitely fail during the actual selection due to + // register banks not being fully defined yet, but we don't want to + // create too much noise on targets early in GlobalISel migration: + MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); + return; + } + const auto &Banks = I->second; + assert(!Banks.empty() && "Size2RegBanks is broken: contains empty sets"); + // Out of the banks that have physical registers of an appropriate size + // pick the one that is most common already: + const RegisterBank *RB = *std::max_element( + Banks.begin(), Banks.end(), + [&Hist](const RegisterBank *RB1, const RegisterBank *RB2) { + // Add the unique ID as a tiebreaker for determinism + return std::make_pair(Hist[RB1], RB1->getID()) < + std::make_pair(Hist[RB2], RB2->getID()); + }); + assert(RB && "Size2RegBanks is broken: contains null pointers"); + MRI.setRegBank(Reg, *RB); + // As we might have not picked the most common bank at the previous step + // out of all banks, but only out of a subset of them, update the stats so + // we could greedily select the locally best result for the next vreg: + ++Hist[RB]; +} + +/// Speculatively assign sensible register banks to all vregs in a function +/// missing a register bank. +static void assignRegBanksToUnconstrainedVRegs( + const InputRegAllocator::Size2RegBanksTy &Size2RegBanks, + MachineFunction &MF) { + MachineRegisterInfo &MRI = MF.getRegInfo(); + SmallDenseMap Hist; + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { + const unsigned Reg = TargetRegisterInfo::index2VirtReg(I); + // Overall in & out vregs of the entire instruction sequence affect the + // selection, the internal vregs and their reg banks don't usually matter, + // but if the selector went into trouble of checking them, they probably do + // so better take them into account as well as the external ones: + if (!MRI.def_empty(Reg) || !MRI.use_empty(Reg)) + // See in which bank the instruction sequence mostly lies in: + ++Hist[MRI.getRegBankOrNull(Reg)]; + } + // Process in & out "external" vregs first so our choices on internal ones + // don't affect our choices on the external ones: + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { + const unsigned Reg = TargetRegisterInfo::index2VirtReg(I); + if (MRI.def_empty(Reg) ^ MRI.use_empty(Reg)) + assignRegBank(Reg, Size2RegBanks, Hist, MF); + } + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { + const unsigned Reg = TargetRegisterInfo::index2VirtReg(I); + // The internal vregs don't really matter, but we still need to pick + // something for them. Technically, out-of-place banks, banks with no + // registers big enough or no registers at all are fine here, but just in + // case we don't really want to stress selector too much with malicious + // input: + if (!MRI.def_empty(Reg) && !MRI.use_empty(Reg)) + assignRegBank(Reg, Size2RegBanks, Hist, MF); + } +} + +static void defineUndefinedVRegs(const TargetInstrInfo &TII, + const TargetRegisterInfo &TRI, + const RegisterBankInfo &RBI, + InputRegAllocator &RA, + MachineIRBuilder &MIRBuilder) { + MachineFunction &MF = MIRBuilder.getMF(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const auto OrigInsertPt = MIRBuilder.getInsertPt(); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), MIRBuilder.getMBB().begin()); + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { + const unsigned Reg = TargetRegisterInfo::index2VirtReg(I); + if (MRI.def_empty(Reg) && !MRI.use_empty(Reg)) { + LLT LLTy = MRI.getType(Reg); + assert(LLTy.isValid() && "Undefined virtual reg doesn't have a type"); + const RegisterBank *RB = MRI.getRegBankOrNull(Reg); + if (!RB) { + DEBUG(dbgs() << "- Undefined virtual register %" + << TargetRegisterInfo::virtReg2Index(Reg) + << " doesn't have a reg bank\n"); + MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg); + } else if (!RA.hasNext(LLTy.getSizeInBits(), *RB, true)) { + DEBUG( + dbgs() << "- Didn't find an allocatable physical register in the\n" + << " register bank " << RB->getName() + << " of the required size;\n vreg: %" + << TargetRegisterInfo::virtReg2Index(Reg) << "(" << LLTy + << "), size: " << LLTy.getSizeInBits() << " bits.\n"); + MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg); + } else if (TestgenNoABI) + MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg); + else + MRI.addLiveIn(RA.next(LLTy.getSizeInBits(), *RB, true), Reg); + } + } + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), OrigInsertPt); + MRI.EmitLiveInCopies(&MF.front(), TRI, TII); +} + +static uint64_t nextGIOpcodeIdx(const int64_t *MatchTable, + uint64_t CurrentIdx) { + static const GIOpcodeMeta GIU; + if (MatchTable[CurrentIdx] == GIR_MergeMemOperands) + do { + ++CurrentIdx; + } while (MatchTable[CurrentIdx] != GIU_MergeMemOperands_EndOfList); + else + CurrentIdx += GIU.NumOperands[MatchTable[CurrentIdx]]; + return CurrentIdx + 1; +} + +static MachineInstrBuilder &ensureNumOperands(MachineInstrBuilder &InsnB, + int64_t NumOperands, + int64_t NumDefs = -1) { + assert(InsnB->getParent() && "Instruction must be inserted into a machine " + "function, otherwise updating an operand in " + "place won't update def-use chains properly"); + assert(InsnB->getMF() && "Unexpected BB not inserted into a MF"); + MachineRegisterInfo &MRI = InsnB->getMF()->getRegInfo(); + + if (NumDefs == -1) + NumDefs = InsnB->getDesc().NumDefs; + + while (InsnB->getNumOperands() < NumOperands) + if (InsnB->getNumOperands() < NumDefs) + InsnB.addDef(MRI.createGenericVirtualRegister(DummyLLT)); + else + InsnB.addReg(MRI.createGenericVirtualRegister(DummyLLT)); + + for (int I = 0; I < NumDefs; ++I) + InsnB->getOperand(I).setIsDef(); + + return InsnB; +} + +static void setType(MachineInstrBuilder &InsnB, unsigned OpIdx, + LLT ExpectedType, MachineRegisterInfo &MRI) { + SmallVector OpIdxs{OpIdx}; + const MCInstrDesc &MCID = InsnB->getDesc(); + if (OpIdx < MCID.getNumOperands() && MCID.OpInfo[OpIdx].isGenericType()) { + const unsigned TypeIdx = MCID.OpInfo[OpIdx].getGenericTypeIndex(); + for (unsigned I = 0, + E = std::min(MCID.getNumOperands(), InsnB->getNumOperands()); + I < E; ++I) { + if (I == OpIdx || !MCID.OpInfo[I].isGenericType()) + continue; + if (MCID.OpInfo[I].getGenericTypeIndex() == TypeIdx) + OpIdxs.push_back(I); + } + } + ensureNumOperands(InsnB, *std::max_element(OpIdxs.begin(), OpIdxs.end()) + 1); + for (auto Idx : OpIdxs) { + MachineOperand &MO = InsnB->getOperand(Idx); + if (!MO.getReg()) + MO.setReg(MRI.createGenericVirtualRegister(ExpectedType)); + else + MRI.setType(MO.getReg(), ExpectedType); + } +} + +static MachineInstrBuilder & +setOperand(MachineInstrBuilder &InsnB, int64_t OpIdx, + std::function OperandAdder) { + ensureNumOperands(InsnB, OpIdx); + + if (InsnB->getNumOperands() == OpIdx) + OperandAdder(InsnB); + else { + SmallVector Uses; + for (int64_t I = InsnB->getNumOperands() - 1; I > OpIdx; --I) { + Uses.push_back(InsnB->getOperand(I)); + InsnB->RemoveOperand(I); + } + InsnB->RemoveOperand(OpIdx); + OperandAdder(InsnB); + while (!Uses.empty()) + InsnB.add(Uses.pop_back_val()); + } + return InsnB; +} + +// Raw representation of a single match table rule as an ordered union of +// several continuous regions of the match table. The representation tries its +// best to ignore parts of the table that don't affect semantics of a single +// rule in isolation, like labels and rule IDs. +// +// Assuming that all the parts of the MatchTable that don't affect the +// selection process but only identify a rule, like GIR_Coverage opcodes, come +// within a rule as a single continuous block, the meaningful parts of the +// rule could be represented as some prefix (starting from the first +// non-control flow opcode, in other words, skipping GIM_Try and its +// label-operand) and suffix: +using RuleBodyTy = std::pair, ArrayRef>; + +// Get a rule descriptor, containing the index of its GIR_Done opcode, RuleID, +// and a raw representation of the entire body. +// +// \pre MatchTable is a non-optimized linear match table. +// \pre CurrentIdx points to the first (and only) GIM_Try opcode of the rule +// that has all its semantically meaningless opcodes that to be excluded from +// the body as a single continuous subregion somewhere. +// \post the prefix of the body is a range from the first opcode after the +// initial GIM_Try until GIR_Coverage (or, more generally, the first +// semantically meaningless opcode), the suffix is a range from the first +// opcode after the last semantically meaningless opcode until GIR_Done +// (exclusive). +static std::tuple +getDoneIdxRuleIDAndBody(const int64_t *MatchTable, uint64_t CurrentIdx) { + // skipping GIM_Try + const uint64_t BodyIdx = nextGIOpcodeIdx(MatchTable, CurrentIdx); + uint64_t ExcludedFirst = BodyIdx; + uint64_t ExcludedLast = ExcludedFirst; + // RuleID we discovered so far. Or the first one if we have many O_o + int64_t FirstRuleID = -1; + // Did we already iterated over that continuous region of unimportant opcodes + // we are going to exclude from the body? + bool ExcludedRegionPassed = false; + unsigned NestingLevel = 0; + do { + switch (MatchTable[CurrentIdx++]) { + case GIM_Try: { + // Skipping the OnFail label operand + CurrentIdx++; + if (++NestingLevel > 1) + report_fatal_error( + "Testgen doesn't support non-linear or optimized Match Tables"); + break; + } + case GIR_Coverage: { + int64_t RuleID = MatchTable[CurrentIdx++]; + assert(RuleID >= 0 && "Expected a non-negative RuleID"); + if (FirstRuleID < 0) { + FirstRuleID = RuleID; + ExcludedFirst = CurrentIdx - 2; + } + ExcludedLast = CurrentIdx; + if (ExcludedRegionPassed) + ExcludedFirst = ExcludedLast; + break; + } + default: + ExcludedRegionPassed = FirstRuleID >= 0; + CurrentIdx = nextGIOpcodeIdx(MatchTable, CurrentIdx - 1); + } + } while (MatchTable[CurrentIdx] != GIR_Done); + assert(BodyIdx <= ExcludedFirst && + (ExcludedFirst < ExcludedLast && FirstRuleID >= 0 || + ExcludedFirst == ExcludedLast) && + ExcludedLast <= CurrentIdx && "Broken Excluded Region"); + const auto &RuleBody = RuleBodyTy( + ArrayRef(&MatchTable[BodyIdx], &MatchTable[ExcludedFirst]), + ArrayRef(&MatchTable[ExcludedLast], &MatchTable[CurrentIdx])); + return std::make_tuple(CurrentIdx, FirstRuleID, RuleBody); +} + +static void +initInsnBuilders(const int64_t *MatchTable, uint64_t CurrentIdx, + MachineIRBuilder &MIRBuilder, + SmallVectorImpl &InsnBuilders) { + do { + switch (MatchTable[CurrentIdx++]) { + case GIM_CheckOpcode: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t Expected = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckOpcode " << InsnID << ", " << Expected << "\n"); + if (static_cast(InsnID) >= InsnBuilders.size()) + InsnBuilders.resize(InsnID + 1); + InsnBuilders[InsnID] = MIRBuilder.buildInstr(Expected); + + // G_CONSTANT's and G_FCONSTANT's are assumed to be well-formed by the + // selector w/o actually checking it. + if (Expected == TargetOpcode::G_CONSTANT) { + ensureNumOperands(InsnBuilders[InsnID], 2); + InsnBuilders[InsnID]->getOperand(1).ChangeToImmediate(1); + } else if (Expected == TargetOpcode::G_FCONSTANT) { + ensureNumOperands(InsnBuilders[InsnID], 2); + InsnBuilders[InsnID]->getOperand(1).ChangeToFPImmediate(ConstantFP::get( + MIRBuilder.getMF().getFunction().getContext(), APFloat(0.0))); + } + break; + } + default: + CurrentIdx = nextGIOpcodeIdx(MatchTable, CurrentIdx - 1); + } + } while (MatchTable[CurrentIdx] != GIR_Done); +} + +static void mainPass(const InstructionSelectorTestgen &Testgen, + uint64_t CurrentIdx, MachineIRBuilder &MIRBuilder, + SmallVectorImpl &InsnBuilders) { + MachineFunction &MF = MIRBuilder.getMF(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetRegisterInfo &TRI = Testgen.getTRI(); + const RegisterBankInfo &RBI = Testgen.getRBI(); + const int64_t *MatchTable = Testgen.getMatchTable(); + do { + switch (MatchTable[CurrentIdx++]) { + case GIM_RecordInsn: { + int64_t NewInsnID = MatchTable[CurrentIdx++]; + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_RecordInsn " << NewInsnID << ", " << InsnID << ", " + << OpIdx << "\n"); + MachineOperand &Def = + ensureNumOperands(InsnBuilders[NewInsnID], 1, 1)->getOperand(0); + MachineOperand &Use = + ensureNumOperands(InsnBuilders[InsnID], OpIdx + 1)->getOperand(OpIdx); + // One of the operands - Use or Def - could be a %noreg, while the other - + // a vreg with a meaningful type or type and bank assigned, and we + // wouldn't generally know which is which. Thefore, make sure here we + // don't loose those constraints: + unsigned Reg = Use.isReg() && Use.getReg() ? Use.getReg() : Def.getReg(); + Reg = Reg ? Reg : MRI.createGenericVirtualRegister(DummyLLT); + Def.setReg(Reg); + Use.ChangeToRegister(Reg, /*isDef*/ false); + break; + } + case GIM_CheckNumOperands: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t Expected = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckNumOperands " << InsnID << ", " << Expected + << "\n"); + ensureNumOperands(InsnBuilders[InsnID], Expected); + break; + } + case GIM_CheckIntrinsicID: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + int64_t Value = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckIntrinsicID " << InsnID << ", " << OpIdx << ", " + << Value << "\n"); + auto &InsnB = ensureNumOperands(InsnBuilders[InsnID], OpIdx, OpIdx); + setOperand(InsnB, OpIdx, [Value](MachineInstrBuilder &InsnB) { + InsnB.addIntrinsicID(static_cast(Value)); + }); + break; + } + case GIM_CheckIsMBB: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckIsMBB " << InsnID << ", " << OpIdx << "\n"); + setOperand( + InsnBuilders[InsnID], OpIdx, + [&MF](MachineInstrBuilder &InsnB) { InsnB.addMBB(&MF.back()); }); + break; + } + case GIM_CheckType: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + int64_t TypeID = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckType " << InsnID << ", " << OpIdx << ", " + << TypeID << "\n"); + setType(InsnBuilders[InsnID], OpIdx, Testgen.getTypeObject(TypeID), MRI); + break; + } + case GIM_CheckPointerToAny: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + int64_t SizeInBits = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckPointerToAny " << InsnID << ", " << OpIdx + << ", " << SizeInBits << "\n"); + if (!SizeInBits) + SizeInBits = MF.getDataLayout().getPointerSizeInBits(0); + setType(InsnBuilders[InsnID], OpIdx, LLT::pointer(0, SizeInBits), MRI); + break; + } + case GIM_CheckRegBankForClass: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + int64_t RCEnum = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckRegBankForClass " << InsnID << ", " << OpIdx + << ", " << RCEnum << "\n"); + const TargetRegisterClass *RC = TRI.getRegClass(RCEnum); + // If the register class crosses multiple register banks just pick one: + const RegisterBank *ExpectedRegBank = getSomeRegBankFromRegClass(RBI, RC); + MachineOperand &MO = + ensureNumOperands(InsnBuilders[InsnID], OpIdx + 1)->getOperand(OpIdx); + if (!MO.getReg()) + MO.setReg(MRI.createGenericVirtualRegister(DummyLLT)); + if (ExpectedRegBank) + MRI.setRegBank(MO.getReg(), *ExpectedRegBank); + else { + DEBUG(dbgs() << "- Didn't find a register bank covering class " + << TRI.getRegClassName(RC) << " for operand #" << OpIdx + << " (" << MO << ") of\n " + << *InsnBuilders[InsnID].getInstr() << "\n"); + // This test will definitely fail during the actual select due to + // register banks not being fully defined yet, but we don't want to + // create too much noise on targets early in GlobalISel migration: + MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); + } + break; + } + case GIM_CheckFeatures: { + int64_t ExpectedBitsetID = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckFeatures " << ExpectedBitsetID << "\n"); + if (!Testgen.checkFeatures(ExpectedBitsetID)) { + DEBUG(dbgs() << "! Testgen deficiency: Don't know how to satisfy\n" + << " Module & Function Target Features yet!\n"); + // This test definitely won't match the rule being tested and might not + // even select at all, so mark it to be skipped: + MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); + } + break; + } + case GIM_CheckConstantInt: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + int64_t Value = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckConstantInt " << InsnID << ", " << OpIdx << ", " + << Value << "\n"); + MachineOperand &MO = + ensureNumOperands(InsnBuilders[InsnID], OpIdx + 1)->getOperand(OpIdx); + if (MO.isReg() && MO.isUse() && MRI.def_empty(MO.getReg())) { + if (!MO.getReg()) + MO.ChangeToRegister(MRI.createGenericVirtualRegister(DummyLLT), + /*isDef*/ false); + InsnBuilders.push_back(MIRBuilder.buildInstr(TargetOpcode::G_CONSTANT) + .addDef(MO.getReg()) + .addImm(Value)); + } + break; + } + case GIM_CheckI64ImmPredicate: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t Predicate = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckI64ImmPredicate " << InsnID << ", " << Predicate + << "\n"); + (void)Predicate; + setOperand(InsnBuilders[InsnID], 1, + [](MachineInstrBuilder &InsnB) { InsnB.addImm(1); }); + break; + } + default: + CurrentIdx = nextGIOpcodeIdx(MatchTable, CurrentIdx - 1); + } + } while (MatchTable[CurrentIdx] != GIR_Done); +} + +static void setMemOperand(MachineFunction &MF, const MachineInstrBuilder &InsnB, + uint64_t Size, unsigned Alignment, + AtomicOrdering Ordering) { + if (InsnB->memoperands_empty()) { + auto PtrInfo = MachinePointerInfo::getUnknownStack(MF); + // Set flags as expected, if the instruction may neither load nor store + // set it as a load conservatively. + auto Flags = (InsnB->mayLoad() ? MachineMemOperand::MOLoad + : MachineMemOperand::MONone) | + (InsnB->mayStore() ? MachineMemOperand::MOStore + : MachineMemOperand::MOLoad); + MachineMemOperand *MMO = MF.getMachineMemOperand( + PtrInfo, Flags, Size, Alignment, AAMDNodes(), + /*MDNode *Ranges=*/nullptr, SyncScope::System, Ordering); + InsnB.addMemOperand(MMO); + return; + } + MachineMemOperand *MMO = *InsnB->memoperands_begin(); + if (MMO->getSize() == Size && MMO->getAlignment() == Alignment && + MMO->getOrdering() == Ordering) + return; + + MachineMemOperand *NewMMO = MF.getMachineMemOperand( + MMO->getPointerInfo(), MMO->getFlags(), Size, Alignment, MMO->getAAInfo(), + MMO->getRanges(), MMO->getSyncScopeID(), Ordering); + *InsnB->memoperands_begin() = NewMMO; +} + +static void setMemOperand(MachineFunction &MF, const MachineInstrBuilder &InsnB, + uint64_t Size, LLT MemOperandLLT, + AtomicOrdering Ordering) { + Type *ValTy = InstructionSelectorTestgen::deduceIRType( + MemOperandLLT, MF.getFunction().getContext()); + auto Alignment = MF.getDataLayout().getABITypeAlignment(ValTy); + setMemOperand(MF, InsnB, Size, Alignment, Ordering); +} + +static void latePass(const int64_t *MatchTable, uint64_t CurrentIdx, + MachineFunction &MF, + SmallVectorImpl &InsnBuilders) { + MachineRegisterInfo &MRI = MF.getRegInfo(); + do { + int64_t MatcherOpcode = MatchTable[CurrentIdx++]; + switch (MatcherOpcode) { + case GIM_CheckAtomicOrdering: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OrderingVal = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckAtomicOrdering " << InsnID << ", " + << OrderingVal << "\n"); + const auto Ordering = static_cast(OrderingVal); + MachineInstrBuilder &InsnB = InsnBuilders[InsnID]; + if (InsnB->memoperands_empty()) { + // There is no hint here which operand represents the value being read / + // written, so use the best guess on value's kind and topology: + LLT LLTy = deduceMemOperandLLT(InsnB, MRI); + // There is no hint on actual number of bytes being read / written + // either, so just derive it from the value's type: + Type *ValTy = InstructionSelectorTestgen::deduceIRType( + LLTy, MF.getFunction().getContext()); + auto Size = MF.getDataLayout().getTypeStoreSize(ValTy); + setMemOperand(MF, InsnB, Size, LLTy, Ordering); + } else { + MachineMemOperand *MMO = *InsnB->memoperands_begin(); + setMemOperand(MF, InsnB, MMO->getSize(), MMO->getAlignment(), Ordering); + } + break; + } + case GIM_CheckMemorySizeEqualTo: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t MMOIdx = MatchTable[CurrentIdx++]; + const uint64_t Size = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckMemorySizeEqualTo " << InsnID << ", " << MMOIdx + << ", " << Size << "\n"); + assert(MMOIdx == 0 && "Testgen doesn't support " + "GIM_CheckMemorySizeEqualTo for MMOIdx > 0 yet"); + (void)MMOIdx; + MachineInstrBuilder &InsnB = InsnBuilders[InsnID]; + // There is no hint here which operand represents the value being read / + // written, so use the best guess on value's kind and topology: + LLT LLTy = deduceMemOperandLLT(InsnB, MRI); + // However, we know the actual number of bytes being read / written, + // so enforce the size on the LLT while keeping its kind and topology to + // figure out the alignment: + LLT SizedLLT = setNewSizeToLLT(LLTy, /*NewSizeInBits=*/Size * 8); + if (InsnB->memoperands_empty()) + setMemOperand(MF, InsnB, Size, SizedLLT, AtomicOrdering::NotAtomic); + else { + MachineMemOperand *MMO = *InsnB->memoperands_begin(); + setMemOperand(MF, InsnB, Size, SizedLLT, MMO->getOrdering()); + } + break; + } + case GIM_CheckMemorySizeEqualToLLT: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t MMOIdx = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckMemorySizeEqualToLLT " << InsnID << ", " + << MMOIdx << ", " << OpIdx << "\n"); + assert(MMOIdx == 0 && "Testgen doesn't support " + "GIM_CheckMemorySizeEqualToLLT for MMOIdx > 0 yet"); + (void)MMOIdx; + auto &InsnB = ensureNumOperands(InsnBuilders[InsnID], OpIdx + 1); + const MachineOperand &MO = InsnB->getOperand(OpIdx); + if (!MO.isReg() || !MO.getReg() || !MRI.getType(MO.getReg()).isValid()) + break; + // We know precisely which operand represents the value being read / + // written and must maintain its type's kind, topology, and size: + LLT LLTy = MRI.getType(MO.getReg()); + const uint64_t Size = (LLTy.getSizeInBits() + 7) / 8; + if (InsnB->memoperands_empty()) + setMemOperand(MF, InsnB, Size, LLTy, AtomicOrdering::NotAtomic); + else { + MachineMemOperand *MMO = *InsnB->memoperands_begin(); + setMemOperand(MF, InsnB, Size, LLTy, MMO->getOrdering()); + } + break; + } + case GIM_CheckMemorySizeLessThanLLT: + case GIM_CheckMemorySizeGreaterThanLLT: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t MMOIdx = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckMemorySize{Less,Greater}ThanLLT " << InsnID + << ", " << MMOIdx << ", " << OpIdx << "\n"); + assert(MMOIdx == 0 && "Testgen doesn't support " + "GIM_CheckMemorySize{Less,Greater}ThanLLT for " + "MMOIdx > 0 yet"); + (void)MMOIdx; + auto &InsnB = ensureNumOperands(InsnBuilders[InsnID], OpIdx + 1); + const MachineOperand &MO = InsnB->getOperand(OpIdx); + if (!MO.isReg() || !MO.getReg() || !MRI.getType(MO.getReg()).isValid()) + break; + // We know precisely which operand represents the value being read / + // written and must maintain its type's kind and topology: + LLT LLTy = MRI.getType(MO.getReg()); + // ... but we need to adjust its size so it would satisfy the condition: + const uint64_t Size = MatcherOpcode == GIM_CheckMemorySizeLessThanLLT + ? (LLTy.getSizeInBits() + 7) / 16 + : (LLTy.getSizeInBits() + 3) / 4; + if (Size == 0) + break; + LLT SizedLLT = setNewSizeToLLT(LLTy, /*NewSizeInBits=*/Size * 8); + if (InsnB->memoperands_empty()) + setMemOperand(MF, InsnB, Size, SizedLLT, AtomicOrdering::NotAtomic); + else { + MachineMemOperand *MMO = *InsnB->memoperands_begin(); + if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT + ? MMO->getSize() * 8 < LLTy.getSizeInBits() + : MMO->getSize() * 8 > LLTy.getSizeInBits()) + break; + setMemOperand(MF, InsnB, Size, SizedLLT, MMO->getOrdering()); + } + break; + } + case GIM_CheckIsSameOperand: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t OpIdx = MatchTable[CurrentIdx++]; + int64_t OtherInsnID = MatchTable[CurrentIdx++]; + int64_t OtherOpIdx = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckIsSameOperand " << InsnID << ", " << OpIdx + << ", " << OtherInsnID << ", " << OtherOpIdx << "\n"); + auto &InsnB = ensureNumOperands(InsnBuilders[InsnID], OpIdx + 1); + auto &OtherInsnB = + ensureNumOperands(InsnBuilders[OtherInsnID], OtherOpIdx + 1); + const auto &MO = InsnB->getOperand(OpIdx); + const auto &OtherMO = OtherInsnB->getOperand(OtherOpIdx); + assert(MO.isReg() && OtherMO.isReg() && "Expected regs as same operands"); + if (MRI.def_empty(OtherMO.getReg()) && MO.getReg()) + setOperand(OtherInsnB, OtherOpIdx, + [&MO](MachineInstrBuilder &OtherInsnB) { + OtherInsnB.addReg(MO.getReg()); + }); + else + setOperand(InsnB, OpIdx, [&OtherMO](MachineInstrBuilder &InsnB) { + InsnB.addReg(OtherMO.getReg()); + }); + break; + } + case GIM_CheckAPIntImmPredicate: { + int64_t InsnID = MatchTable[CurrentIdx++]; + int64_t Predicate = MatchTable[CurrentIdx++]; + DEBUG(dbgs() << "GIM_CheckAPIntImmPredicate " << InsnID << ", " + << Predicate << "\n"); + (void)Predicate; + setOperand( + InsnBuilders[InsnID], 1, [&MF, &MRI](MachineInstrBuilder &InsnB) { + const MachineOperand &Def = InsnB->getOperand(0); + LLT LLTy = Def.isReg() ? MRI.getType(Def.getReg()) : LLT{}; + LLTy = LLTy.isValid() ? LLTy : DummyLLT; + InsnB.addCImm(ConstantInt::get(MF.getFunction().getContext(), + APInt(LLTy.getSizeInBits(), 1))); + }); + break; + } + default: + CurrentIdx = nextGIOpcodeIdx(MatchTable, CurrentIdx - 1); + } + } while (MatchTable[CurrentIdx] != GIR_Done); +} + +static std::string buildName(unsigned RuleNum, uint64_t RuleIdx, + int64_t RuleID = -1) { + const char *Prefix = "test_rule"; + const auto &Num = "_num" + Twine{RuleNum}; + const auto &ID = RuleID >= 0 ? "_id" + Twine{RuleID} : Twine{}; + const auto &Idx = "_at_idx" + Twine{RuleIdx}; + return (RuleID >= 0 ? Prefix + ID : Prefix + Num + Idx).str(); +} + +static std::string buildDesc(unsigned RuleNum, uint64_t RuleIdx, + int64_t RuleID) { + const auto &Num = "rule number " + Twine{RuleNum}; + const auto &ID = RuleID >= 0 + ? "ID " + Twine{RuleID} + : ""; + const auto &Idx = "MatchTable index " + Twine{RuleIdx}; + return (Num + "\t" + ID + "\t" + Idx).str(); +} + +static bool skipRule(unsigned RuleNum, uint64_t CurrentIdx, + int64_t RuleID = -1) { + const int64_t ID = TestgenFilterByRuleNum ? RuleNum : RuleID; + if (ID < 0) + return false; + + static SmallDenseSet ExplicitlyListedRules; + if (ExplicitlyListedRules.empty()) + for (unsigned Val : TestgenIncludeOnly) + ExplicitlyListedRules.insert(Val); + + if (!ExplicitlyListedRules.empty()) + return !ExplicitlyListedRules.count(ID); + + if (ID < TestgenFromRule || ID > TestgenUntilRule) + return true; + + static SmallDenseSet ExcludedRules; + if (ExcludedRules.empty()) + for (unsigned Val : TestgenExcludeRules) + ExcludedRules.insert(Val); + return ExcludedRules.count(ID); +} + +static void +orderInstructions(MachineIRBuilder &MIRBuilder, + const SmallVectorImpl &InsnBuilders) { + MachineFunction &MF = MIRBuilder.getMF(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + SmallPtrSet Visited; + const std::function Visit = + [&](const MachineInstrBuilder &MIB) { + if (!Visited.insert(MIB).second) + return; + for (unsigned I = 0, E = MIB->getNumOperands(); I != E; ++I) { + const MachineOperand &MO = MIB->getOperand(E - I - 1); + if (MO.isReg() && MO.isUse() && MO.getReg()) + if (MachineInstr *DefMI = MRI.getVRegDef(MO.getReg())) + Visit({MF, DefMI}); + } + MIRBuilder.getMBB().splice(MIRBuilder.getInsertPt(), MIB->getParent(), + MIB); + }; + if (!InsnBuilders.empty()) + Visit(InsnBuilders.front()); +} + +void InstructionSelectorTestgen::generateTestCase( + uint64_t CurrentIdx, InputRegAllocator &RA, + MachineIRBuilder &MIRBuilder) const { + + SmallVector InsnBuilders; + initInsnBuilders(getMatchTable(), CurrentIdx, MIRBuilder, InsnBuilders); + + mainPass(*this, CurrentIdx, MIRBuilder, InsnBuilders); + + MachineFunction &MF = MIRBuilder.getMF(); + latePass(getMatchTable(), CurrentIdx, MF, InsnBuilders); + + orderInstructions(MIRBuilder, InsnBuilders); + + // We need all the banks in place so we could speculate on ABI better: + DEBUG( + dbgs() << "** Assigning RegBanks for VRegs unconstrained by the rule\n"); + assignRegBanksToUnconstrainedVRegs(RA.getSize2RegBanks(), MF); + + // assignRegBanksToUnconstrainedVRegs relies on the input vregs being + // undefined, so run defineUndefinedVRegs later, not before: + DEBUG(dbgs() << "** Defining VRegs undefined by the rule\n"); + defineUndefinedVRegs(getTII(), getTRI(), getRBI(), RA, MIRBuilder); + + // finalize the MBB with a non-generic terminator that uses the root vreg: + InstructionSelectorTestgen::emitReturnIntoTestCase(MIRBuilder); + + MF.getProperties().set(MachineFunctionProperties::Property::Legalized); + MF.getProperties().set(MachineFunctionProperties::Property::RegBankSelected); +} + +void InstructionSelectorTestgen::generateTestCasesImpl( + Module &M, MachineModuleInfo &MMI) const { + + const MachineFunction *MF = MMI.getMachineFunction(*M.begin()); + assert(MF && "Expected the Module / MMI to contain an init Machine Function"); + + const int64_t *MatchTable = getMatchTable(); + unsigned RuleNum = 0; + uint64_t CurrentIdx = 0; + uint64_t CurrentDoneIdx = ~0ULL; + int64_t RuleID = -1; + using RuleSig = std::tuple; + RuleBodyTy RuleBody; + + DenseMap> RuleDuplicates; + InputRegAllocator RA(getTRI(), getRBI(), *MF); + + while (MatchTable[CurrentIdx] != GIM_Reject) { + std::tie(CurrentDoneIdx, RuleID, RuleBody) = + getDoneIdxRuleIDAndBody(MatchTable, CurrentIdx); + + if (!skipRule(RuleNum, CurrentIdx, RuleID)) { + const auto &Name = buildName(RuleNum, CurrentIdx, RuleID); + if (TestgenQuery) + dbgs() << Name << "\t" << buildDesc(RuleNum, CurrentIdx, RuleID); + + SmallVectorImpl &Dups = RuleDuplicates[RuleBody]; + Dups.emplace_back(RuleNum, CurrentIdx, RuleID); + // The major source of literal duplicates is the fact that we map MVTs + // like i32 and f32 to the same s32 LLT, therefore 2 or more patterns + // originally written for SelectionDAG ISel get imported as the exact same + // sequence of semantically meaningful match table opcodes, matching and + // rendering opcodes both: + if (Dups.size() > 1) { + const auto &DupName = + buildName(std::get<0>(Dups.front()), std::get<1>(Dups.front()), + std::get<2>(Dups.front())); + if (TestgenQuery) + dbgs() << "\tduplicate of " << DupName; + else + DEBUG(dbgs() << "*** Skipping test case " << Name + << " as a duplicate of " << DupName << "\n"); + } else if (!TestgenQuery) { + DEBUG(dbgs() << "\n*** Generating test case " << Name << " ***\n"); + MachineIRBuilder MIRBuilder = + InstructionSelectorTestgen::createEmptyTestCase(Name, M, MMI); + generateTestCase(CurrentIdx, RA.reset(), MIRBuilder); + if (!MIRBuilder.getMF().verify(nullptr, Name.c_str(), false)) { + Function *F = M.getFunction(Name); + MMI.deleteMachineFunctionFor(*F); + F->eraseFromParent(); + } + } + } + ++RuleNum; + CurrentIdx = nextGIOpcodeIdx(MatchTable, CurrentDoneIdx); + + if (TestgenQuery) + dbgs() << "\n"; + } +} Index: test/TableGen/GlobalISelEmitter.td =================================================================== --- test/TableGen/GlobalISelEmitter.td +++ test/TableGen/GlobalISelEmitter.td @@ -79,6 +79,22 @@ // CHECK-NEXT: bool testImmPredicate_APInt(unsigned PredicateID, const APInt &Imm) const override; // CHECK-NEXT: bool testImmPredicate_APFloat(unsigned PredicateID, const APFloat &Imm) const override; // CHECK-NEXT: const int64_t *getMatchTable() const override; +// CHECK-NEXT: std::unique_ptr getTestgen() const override; +// +// CHECK: class Testgen : public InstructionSelectorTestgen { +// CHECK-NEXT: public: +// CHECK-NEXT: Testgen(const MyTargetInstructionSelector &ISel) : ISel(ISel) {} +// CHECK-NEXT: void generateTestCases(Module &M, MachineModuleInfo &MMI) const override; +// CHECK-NEXT: bool checkFeatures(unsigned FeatureBitsetID) const override; +// CHECK-NEXT: LLT getTypeObject(unsigned TypeObjectID) const override; +// CHECK-NEXT: const TargetInstrInfo &getTII() const override; +// CHECK-NEXT: const TargetRegisterInfo &getTRI() const override; +// CHECK-NEXT: const RegisterBankInfo &getRBI() const override; +// CHECK-NEXT: const int64_t *getMatchTable() const override; +// +// CHECK: private: +// CHECK-NEXT: const MyTargetInstructionSelector &ISel; +// CHECK-NEXT: }; // CHECK-NEXT: #endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL // CHECK-LABEL: #ifdef GET_GLOBALISEL_TEMPORARIES_INIT Index: utils/TableGen/GlobalISelEmitter.cpp =================================================================== --- utils/TableGen/GlobalISelEmitter.cpp +++ utils/TableGen/GlobalISelEmitter.cpp @@ -3926,6 +3926,24 @@ << " bool testImmPredicate_APFloat(unsigned PredicateID, const APFloat " "&Imm) const override;\n" << " const int64_t *getMatchTable() const override;\n" + << " std::unique_ptr " + "getTestgen() const override;\n" + << "\n" + << " class Testgen : public InstructionSelectorTestgen {\n" + << " public:\n" + << " Testgen(const " << Target.getName() + << "InstructionSelector &ISel) : ISel(ISel) {}\n" + << " void " + "generateTestCases(Module &M, MachineModuleInfo &MMI) const override;\n" + << " bool checkFeatures(unsigned FeatureBitsetID) const override;\n" + << " LLT getTypeObject(unsigned TypeObjectID) const override;\n" + << " const TargetInstrInfo &getTII() const override;\n" + << " const TargetRegisterInfo &getTRI() const override;\n" + << " const RegisterBankInfo &getRBI() const override;\n" + << " const int64_t *getMatchTable() const override;\n\n" + << " private:\n" + << " const " << Target.getName() << "InstructionSelector &ISel;\n" + << " };\n" << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n"; OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n" @@ -4115,6 +4133,62 @@ << " return false;\n" << "}\n\n"; + OS << "std::unique_ptr\n" + << Target.getName() << "InstructionSelector::getTestgen() const {\n" + << " return llvm::make_unique(*this);\n" + << "}\n\n" + + << "void " << Target.getName() + << "InstructionSelector::Testgen::generateTestCases(Module &M" + ", MachineModuleInfo &MMI) const {\n" + << " MachineFunction *MF = MMI.getMachineFunction(*M.rbegin());\n" + << " ISel.AvailableFunctionFeatures = " + "ISel.computeAvailableFunctionFeatures(&ISel.STI, MF);\n" + << " generateTestCasesImpl(M, MMI);\n" + << "}\n\n" + + << "bool " << Target.getName() + << "InstructionSelector::Testgen::checkFeatures(unsigned FeatureBitsetID) " + "const {\n" + << " const PredicateBitset &ExpectedFeatures = " + "ISel.ISelInfo.FeatureBitsets[FeatureBitsetID];\n" + << " const PredicateBitset &AvailableFeatures = " + "ISel.getAvailableFeatures();\n" + << " return (ExpectedFeatures & AvailableFeatures) == ExpectedFeatures;\n" + << "}\n\n" + + << "const TargetInstrInfo &" << Target.getName() + << "InstructionSelector::Testgen::getTII() const {\n" + << " return ISel.TII;\n" + << "}\n\n" + + << "const TargetRegisterInfo &" << Target.getName() + << "InstructionSelector::Testgen::getTRI() const {\n" + << " return ISel.TRI;\n" + << "}\n\n" + + << "const RegisterBankInfo &" << Target.getName() + << "InstructionSelector::Testgen::getRBI() const {\n" + << " return ISel.RBI;\n" + << "}\n\n" + + << "LLT " << Target.getName() + << "InstructionSelector::Testgen::getTypeObject(unsigned TypeObjectID) " + "const {\n" + << " return ISel.ISelInfo.TypeObjects[TypeObjectID];\n" + << "}\n\n"; + + OS << "const int64_t *" << Target.getName() + << "InstructionSelector::Testgen::getMatchTable() const {\n"; + if (OptimizeMatchTable || !GenerateCoverage) { + const MatchTable VanillaTable = buildMatchTable(Rules, false, true); + VanillaTable.emitDeclaration(OS); + OS << " return "; + VanillaTable.emitUse(OS); + } else + OS << " return ISel.getMatchTable()"; + OS << ";\n}\n\n"; + const MatchTable Table = buildMatchTable(Rules, OptimizeMatchTable, GenerateCoverage); OS << "const int64_t *" << Target.getName() @@ -4129,6 +4203,8 @@ << "PredicateBitset AvailableModuleFeatures;\n" << "mutable PredicateBitset AvailableFunctionFeatures;\n" << "PredicateBitset getAvailableFeatures() const {\n" + << " if (TestgenSetAllFeatures)\n" + << " return PredicateBitset().set();\n" << " return AvailableModuleFeatures | AvailableFunctionFeatures;\n" << "}\n" << "PredicateBitset\n" Index: utils/find_failing_instruction_select_rules.sh =================================================================== --- /dev/null +++ utils/find_failing_instruction_select_rules.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +llc="$1" +triple="$2" +extra_args="${@:3}" + +if [ -z "$llc" -o -z "$triple" ]; then + echo "usage: $0 [extra llc args]" + exit 1 +fi + +rules_list="$($llc /dev/null -x mir -o /dev/null 2>&1 -mtriple $triple \ + -run-pass instruction-select-testgen -testgen-query -simplify-mir $extra_args)" + +last_rule_number=$(echo "$rules_list" | \ + grep -io 'rule number \d\+' | cut -d' ' -f 3 | sort -n | tail -n 1) + +if [ -z "$last_rule_number" ]; then + echo "Couldn't query the selection rules" + exit 2 +fi + +printf "%s" "$(dirname $0)/update_instruction_select_testgen_tests.sh \ + $(dirname $(dirname $0))/test/CodeGen/@TARGET-NAME@/GlobalISel/@TEST-NAME@-testgen-testgend.mir \ + $llc $triple $extra_args -testgen-exclude-rules=" + +found=0 +for I in `seq 0 $last_rule_number`; do + llc_command="$llc /dev/null -x mir -o /dev/null -mtriple $triple \ + -run-pass instruction-select-testgen -testgen-set-all-features \ + -run-pass instruction-select -disable-gisel-legality-check \ + -verify-machineinstrs -simplify-mir $extra_args -testgen-include-only=" + if ! $(${llc_command}${I} -testgen-filter-by-rule-number 2> /dev/null); then + if [ $found -gt 0 ]; then + last_failed_command="$llc_command" + printf "," + fi + last_failed_rule_id=$(echo "$rules_list" | \ + grep -io "rule number $I\tID \d\+" | cut -f 2 | cut -d' ' -f 2) + printf "%d" $last_failed_rule_id + found=$((found + 1)) + fi +done +printf "\n" + +if [ $found -gt 0 ]; then + echo + echo "# Try this to reproduce the last failure:" + echo ${last_failed_command}${last_failed_rule_id} -print-after-all +fi Index: utils/update_instruction_select_testgen_tests.sh =================================================================== --- /dev/null +++ utils/update_instruction_select_testgen_tests.sh @@ -0,0 +1,63 @@ +#!/bin/bash -e + +testgend="$1" +llc="$2" +triple="$3" +extra_args="${@:4}" + +if [ -z "$testgend" -o -z "$llc" -o -z "$triple" ]; then + echo "usage: $0 [extra llc args]" + exit 1 +fi + +testgend_nomir="${testgend%.mir}" +selected="${testgend_nomir%?testgend}-selected.mir" +touch "$selected" + +testgen_command="$llc $selected -mtriple $triple -testgen-set-all-features \ + -run-pass instruction-select-testgen $extra_args \ + -verify-machineinstrs -simplify-mir -o -" + +text="$($testgen_command 2> /dev/null | perl -pe 's/\s+$/\n/')" + +first_test_line_number=$(echo "$text" | grep -n 'name:\s*test_' \ + | head -n 1 | cut -d: -f 1) +if [ -z "$first_test_line_number" ]; then + echo "Couldn't generate any tests by running the following testgen command:" + echo "$testgen_command" + exit 2 +fi +tests=$(echo "$text" | tail +$((first_test_line_number - 1))) + +echo "# NOTE: This test has been autogenerated by utils/$(basename $0)" > "$testgend" +echo "# RUN: llc -mtriple $triple -run-pass instruction-select-testgen -testgen-set-all-features \\" >> "$testgend" +if [ -n "$extra_args" ]; then + echo "# RUN: $extra_args \\" >> "$testgend" +fi +echo "# RUN: -verify-machineinstrs %S/$(basename $selected) \\" >> "$testgend" +echo "# RUN: -simplify-mir -o - 2>&1 | FileCheck %s --check-prefix=TESTGEND --match-full-lines" >> "$testgend" +echo "#" >> "$testgend" +echo "$tests" \ + | perl -pe 's/^(name:\s*test_\w+)/# TESTGEND-LABEL: \1/' \ + | perl -pe 's/^([^#\n])/# TESTGEND: \1/' \ + | perl -pe 's/^\n$/#\n/' >> "$testgend" + +echo "# RUN: llc -mtriple $triple -run-pass instruction-select \\" > "$selected" +echo "# RUN: -testgen-set-all-features -disable-gisel-legality-check \\" >> "$selected" +echo "# RUN: -verify-machineinstrs %s -o - 2>&1 \\" >> "$selected" +echo "# RUN: | FileCheck %s --check-prefix=SELECTED" >> "$selected" +echo "#" >> "$selected" +echo "# Test if this file is in sync with the current state of the selector:" >> "$selected" +echo "$text" >> "$selected" + +$(dirname $0)/update_mir_test_checks.py --llc-binary="$llc" "$selected" + +sed -i '' -e "/^# Test if this file is in sync with the current state of the selector:$/ a\\ + # RUN: cat %s | FileCheck --check-prefix=TESTGEND --match-full-lines \\\\\\ + # RUN: %S/$(basename $testgend) + " "$selected" + +sed -i '' -e "1 i\\ + # NOTE: This test has been autogenerated by utils/$(basename $0)\\ + # + " "$selected"