Index: llvm/trunk/tools/llvm-exegesis/lib/AArch64/Target.cpp =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/AArch64/Target.cpp +++ llvm/trunk/tools/llvm-exegesis/lib/AArch64/Target.cpp @@ -9,6 +9,7 @@ #include "../Target.h" #include "../Latency.h" #include "AArch64.h" +#include "AArch64RegisterInfo.h" namespace exegesis { @@ -26,33 +27,51 @@ } }; -class ExegesisAArch64Target : public ExegesisTarget { - std::vector setRegTo(const llvm::MCSubtargetInfo &STI, - const llvm::APInt &Value, - unsigned Reg) const override { - llvm_unreachable("Not yet implemented"); - } +namespace { - unsigned getScratchMemoryRegister(const llvm::Triple &) const override { - llvm_unreachable("Not yet implemented"); +static unsigned getLoadImmediateOpcode(unsigned RegBitWidth) { + switch (RegBitWidth) { + case 32: + return llvm::AArch64::MOVi32imm; + case 64: + return llvm::AArch64::MOVi64imm; } + llvm_unreachable("Invalid Value Width"); +} - void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg, - unsigned Offset) const override { - llvm_unreachable("Not yet implemented"); - } +// Generates instruction to load an immediate value into a register. +static llvm::MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth, + const llvm::APInt &Value) { + if (Value.getBitWidth() > RegBitWidth) + llvm_unreachable("Value must fit in the Register"); + return llvm::MCInstBuilder(getLoadImmediateOpcode(RegBitWidth)) + .addReg(Reg) + .addImm(Value.getZExtValue()); +} - unsigned getMaxMemoryAccessSize() const override { - llvm_unreachable("Not yet implemented"); +} // namespace + +class ExegesisAArch64Target : public ExegesisTarget { + std::vector setRegTo(const llvm::MCSubtargetInfo &STI, + unsigned Reg, + const llvm::APInt &Value) const override { + if (llvm::AArch64::GPR32RegClass.contains(Reg)) + return {loadImmediate(Reg, 32, Value)}; + if (llvm::AArch64::GPR64RegClass.contains(Reg)) + return {loadImmediate(Reg, 64, Value)}; + llvm::errs() << "setRegTo is not implemented, results will be unreliable\n"; + return {}; } bool matchesArch(llvm::Triple::ArchType Arch) const override { return Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be; } + void addTargetSpecificPasses(llvm::PassManagerBase &PM) const override { // Function return is a pseudo-instruction that needs to be expanded PM.add(llvm::createAArch64ExpandPseudoPass()); } + std::unique_ptr createLatencyBenchmarkRunner(const LLVMState &State) const override { return llvm::make_unique(State); Index: llvm/trunk/tools/llvm-exegesis/lib/Assembler.h =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/Assembler.h +++ llvm/trunk/tools/llvm-exegesis/lib/Assembler.h @@ -39,6 +39,12 @@ // convention and target machine). llvm::BitVector getFunctionReservedRegs(const llvm::TargetMachine &TM); +// A simple object storing the value for a particular register. +struct RegisterValue { + unsigned Register; + llvm::APInt Value; +}; + // Creates a temporary `void foo(char*)` function containing the provided // Instructions. Runs a set of llvm Passes to provide correct prologue and // epilogue. Once the MachineFunction is ready, it is assembled for TM to @@ -46,7 +52,7 @@ void assembleToStream(const ExegesisTarget &ET, std::unique_ptr TM, llvm::ArrayRef LiveIns, - llvm::ArrayRef RegsToDef, + llvm::ArrayRef RegisterInitialValues, llvm::ArrayRef Instructions, llvm::raw_pwrite_stream &AsmStream); Index: llvm/trunk/tools/llvm-exegesis/lib/Assembler.cpp =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/Assembler.cpp +++ llvm/trunk/tools/llvm-exegesis/lib/Assembler.cpp @@ -29,18 +29,18 @@ static constexpr const char FunctionID[] = "foo"; static std::vector -generateSnippetSetupCode(const llvm::ArrayRef RegsToDef, - const ExegesisTarget &ET, - const llvm::LLVMTargetMachine &TM, bool &IsComplete) { - IsComplete = true; +generateSnippetSetupCode(const ExegesisTarget &ET, + const llvm::MCSubtargetInfo *const MSI, + llvm::ArrayRef RegisterInitialValues, + bool &IsSnippetSetupComplete) { std::vector Result; - // for (const unsigned Reg : RegsToDef) { - // // Load a constant in the register. - // const auto Code = ET.setRegToConstant(*TM.getMCSubtargetInfo(), Reg); - // if (Code.empty()) - // IsComplete = false; - // Result.insert(Result.end(), Code.begin(), Code.end()); - // } + for (const RegisterValue &RV : RegisterInitialValues) { + // Load a constant in the register. + const auto SetRegisterCode = ET.setRegTo(*MSI, RV.Register, RV.Value); + if (SetRegisterCode.empty()) + IsSnippetSetupComplete = false; + Result.insert(Result.end(), SetRegisterCode.begin(), SetRegisterCode.end()); + } return Result; } @@ -149,7 +149,7 @@ void assembleToStream(const ExegesisTarget &ET, std::unique_ptr TM, llvm::ArrayRef LiveIns, - llvm::ArrayRef RegsToDef, + llvm::ArrayRef RegisterInitialValues, llvm::ArrayRef Instructions, llvm::raw_pwrite_stream &AsmStream) { std::unique_ptr Context = @@ -171,13 +171,12 @@ MF.getRegInfo().addLiveIn(Reg); bool IsSnippetSetupComplete = false; - std::vector SnippetWithSetup = - generateSnippetSetupCode(RegsToDef, ET, *TM, IsSnippetSetupComplete); - if (!SnippetWithSetup.empty()) { - SnippetWithSetup.insert(SnippetWithSetup.end(), Instructions.begin(), - Instructions.end()); - Instructions = SnippetWithSetup; - } + std::vector Code = + generateSnippetSetupCode(ET, TM->getMCSubtargetInfo(), + RegisterInitialValues, IsSnippetSetupComplete); + + Code.insert(Code.end(), Instructions.begin(), Instructions.end()); + // If the snippet setup is not complete, we disable liveliness tracking. This // means that we won't know what values are in the registers. if (!IsSnippetSetupComplete) @@ -188,7 +187,7 @@ MF.getRegInfo().freezeReservedRegs(MF); // Fill the MachineFunction from the instructions. - fillMachineFunction(MF, LiveIns, Instructions); + fillMachineFunction(MF, LiveIns, Code); // We create the pass manager, run the passes to populate AsmBuffer. llvm::MCContext &MCContext = MMI->getContext(); Index: llvm/trunk/tools/llvm-exegesis/lib/BenchmarkCode.h =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/BenchmarkCode.h +++ llvm/trunk/tools/llvm-exegesis/lib/BenchmarkCode.h @@ -23,7 +23,7 @@ // Before the code is executed some instructions are added to setup the // registers initial values. - std::vector RegsToDef; + std::vector RegisterInitialValues; // We also need to provide the registers that are live on entry for the // assembler to generate proper prologue/epilogue. Index: llvm/trunk/tools/llvm-exegesis/lib/BenchmarkRunner.cpp =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/BenchmarkRunner.cpp +++ llvm/trunk/tools/llvm-exegesis/lib/BenchmarkRunner.cpp @@ -104,7 +104,7 @@ return std::move(E); llvm::raw_fd_ostream OFS(ResultFD, true /*ShouldClose*/); assembleToStream(State.getExegesisTarget(), State.createTargetMachine(), - BC.LiveIns, BC.RegsToDef, Code, OFS); + BC.LiveIns, BC.RegisterInitialValues, Code, OFS); return ResultPath.str(); } Index: llvm/trunk/tools/llvm-exegesis/lib/SnippetGenerator.h =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/SnippetGenerator.h +++ llvm/trunk/tools/llvm-exegesis/lib/SnippetGenerator.h @@ -48,8 +48,8 @@ generateConfigurations(unsigned Opcode) const; // Given a snippet, computes which registers the setup code needs to define. - std::vector - computeRegsToDef(const std::vector &Snippet) const; + std::vector computeRegisterInitialValues( + const std::vector &Snippet) const; protected: const LLVMState &State; Index: llvm/trunk/tools/llvm-exegesis/lib/SnippetGenerator.cpp =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/SnippetGenerator.cpp +++ llvm/trunk/tools/llvm-exegesis/lib/SnippetGenerator.cpp @@ -49,7 +49,7 @@ } if (CT.ScratchSpacePointerInReg) BC.LiveIns.push_back(CT.ScratchSpacePointerInReg); - BC.RegsToDef = computeRegsToDef(CT.Instructions); + BC.RegisterInitialValues = computeRegisterInitialValues(CT.Instructions); Output.push_back(std::move(BC)); } return Output; @@ -57,14 +57,14 @@ return E.takeError(); } -std::vector SnippetGenerator::computeRegsToDef( +std::vector SnippetGenerator::computeRegisterInitialValues( const std::vector &Instructions) const { // Collect all register uses and create an assignment for each of them. // Ignore memory operands which are handled separately. // Loop invariant: DefinedRegs[i] is true iif it has been set at least once // before the current instruction. llvm::BitVector DefinedRegs = RATC.emptyRegisters(); - std::vector RegsToDef; + std::vector RIV; for (const InstructionBuilder &IB : Instructions) { // Returns the register that this Operand sets or uses, or 0 if this is not // a register. @@ -82,7 +82,7 @@ if (!Op.IsDef) { const unsigned Reg = GetOpReg(Op); if (Reg > 0 && !DefinedRegs.test(Reg)) { - RegsToDef.push_back(Reg); + RIV.push_back(RegisterValue{Reg, llvm::APInt()}); DefinedRegs.set(Reg); } } @@ -96,7 +96,7 @@ } } } - return RegsToDef; + return RIV; } llvm::Expected SnippetGenerator::generateSelfAliasingCodeTemplate( Index: llvm/trunk/tools/llvm-exegesis/lib/Target.h =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/Target.h +++ llvm/trunk/tools/llvm-exegesis/lib/Target.h @@ -36,25 +36,31 @@ virtual void addTargetSpecificPasses(llvm::PassManagerBase &PM) const {} // Generates code to move a constant into a the given register. - virtual std::vector setRegTo(const llvm::MCSubtargetInfo &STI, - const llvm::APInt &Value, - unsigned Reg) const = 0; + // Precondition: Value must fit into Reg. + virtual std::vector + setRegTo(const llvm::MCSubtargetInfo &STI, unsigned Reg, + const llvm::APInt &Value) const = 0; // Returns the register pointing to scratch memory, or 0 if this target // does not support memory operands. The benchmark function uses the // default calling convention. - virtual unsigned getScratchMemoryRegister(const llvm::Triple &) const = 0; + virtual unsigned getScratchMemoryRegister(const llvm::Triple &) const { + return 0; + } // Fills memory operands with references to the address at [Reg] + Offset. virtual void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg, - unsigned Offset) const = 0; + unsigned Offset) const { + llvm_unreachable( + "fillMemoryOperands() requires getScratchMemoryRegister() > 0"); + } // Returns the maximum number of bytes a load/store instruction can access at // once. This is typically the size of the largest register available on the // processor. Note that this only used as a hint to generate independant // load/stores to/from memory, so the exact returned value does not really // matter as long as it's large enough. - virtual unsigned getMaxMemoryAccessSize() const = 0; + virtual unsigned getMaxMemoryAccessSize() const { return 0; } // Creates a snippet generator for the given mode. std::unique_ptr Index: llvm/trunk/tools/llvm-exegesis/lib/Target.cpp =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/Target.cpp +++ llvm/trunk/tools/llvm-exegesis/lib/Target.cpp @@ -90,21 +90,8 @@ class ExegesisDefaultTarget : public ExegesisTarget { private: std::vector setRegTo(const llvm::MCSubtargetInfo &STI, - const llvm::APInt &Value, - unsigned Reg) const override { - llvm_unreachable("Not yet implemented"); - } - - unsigned getScratchMemoryRegister(const llvm::Triple &) const override { - llvm_unreachable("Not yet implemented"); - } - - void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg, - unsigned Offset) const override { - llvm_unreachable("Not yet implemented"); - } - - unsigned getMaxMemoryAccessSize() const override { + unsigned Reg, + const llvm::APInt &Value) const override { llvm_unreachable("Not yet implemented"); } Index: llvm/trunk/tools/llvm-exegesis/lib/X86/Target.cpp =================================================================== --- llvm/trunk/tools/llvm-exegesis/lib/X86/Target.cpp +++ llvm/trunk/tools/llvm-exegesis/lib/X86/Target.cpp @@ -101,8 +101,8 @@ } }; -static unsigned GetLoadImmediateOpcode(const llvm::APInt &Value) { - switch (Value.getBitWidth()) { +static unsigned GetLoadImmediateOpcode(unsigned RegBitWidth) { + switch (RegBitWidth) { case 8: return llvm::X86::MOV8ri; case 16: @@ -115,10 +115,12 @@ llvm_unreachable("Invalid Value Width"); } -static llvm::MCInst loadImmediate(unsigned Reg, const llvm::APInt &Value, - unsigned MaxBitWidth) { - assert(Value.getBitWidth() <= MaxBitWidth && "Value too big to fit register"); - return llvm::MCInstBuilder(GetLoadImmediateOpcode(Value)) +// Generates instruction to load an immediate value into a register. +static llvm::MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth, + const llvm::APInt &Value) { + if (Value.getBitWidth() > RegBitWidth) + llvm_unreachable("Value must fit in the Register"); + return llvm::MCInstBuilder(GetLoadImmediateOpcode(RegBitWidth)) .addReg(Reg) .addImm(Value.getZExtValue()); } @@ -165,6 +167,8 @@ .addImm(Bytes); } +// Reserves some space on the stack, fills it with the content of the provided +// constant and provide methods to load the stack value into a register. struct ConstantInliner { explicit ConstantInliner(const llvm::APInt &Constant) : StackSize(Constant.getBitWidth() / 8) { @@ -187,17 +191,19 @@ Constant.extractBits(8, ByteOffset * 8).getZExtValue())); } - std::vector loadAndFinalize(unsigned Reg, unsigned Opcode, - unsigned BitWidth) { - assert(StackSize * 8 == BitWidth && "Value does not have the correct size"); + std::vector loadAndFinalize(unsigned Reg, unsigned RegBitWidth, + unsigned Opcode) { + assert(StackSize * 8 == RegBitWidth && + "Value does not have the correct size"); add(loadToReg(Reg, Opcode)); add(releaseStackSpace(StackSize)); return std::move(Instructions); } - std::vector loadX87AndFinalize(unsigned Reg, unsigned Opcode, - unsigned BitWidth) { - assert(StackSize * 8 == BitWidth && "Value does not have the correct size"); + std::vector + loadX87AndFinalize(unsigned Reg, unsigned RegBitWidth, unsigned Opcode) { + assert(StackSize * 8 == RegBitWidth && + "Value does not have the correct size"); add(llvm::MCInstBuilder(Opcode) .addReg(llvm::X86::RSP) // BaseReg .addImm(1) // ScaleAmt @@ -211,7 +217,7 @@ } std::vector popFlagAndFinalize() { - assert(StackSize * 8 == 32 && "Value does not have the correct size"); + assert(StackSize * 8 == 64 && "Value does not have the correct size"); add(llvm::MCInstBuilder(llvm::X86::POPF64)); return std::move(Instructions); } @@ -275,46 +281,46 @@ } std::vector setRegTo(const llvm::MCSubtargetInfo &STI, - const llvm::APInt &Value, - unsigned Reg) const override { + unsigned Reg, + const llvm::APInt &Value) const override { if (llvm::X86::GR8RegClass.contains(Reg)) - return {loadImmediate(Reg, Value, 8)}; + return {loadImmediate(Reg, 8, Value)}; if (llvm::X86::GR16RegClass.contains(Reg)) - return {loadImmediate(Reg, Value, 16)}; + return {loadImmediate(Reg, 16, Value)}; if (llvm::X86::GR32RegClass.contains(Reg)) - return {loadImmediate(Reg, Value, 32)}; + return {loadImmediate(Reg, 32, Value)}; if (llvm::X86::GR64RegClass.contains(Reg)) - return {loadImmediate(Reg, Value, 64)}; + return {loadImmediate(Reg, 64, Value)}; ConstantInliner CI(Value); if (llvm::X86::VR64RegClass.contains(Reg)) - return CI.loadAndFinalize(Reg, llvm::X86::MMX_MOVQ64rm, 64); + return CI.loadAndFinalize(Reg, 64, llvm::X86::MMX_MOVQ64rm); if (llvm::X86::VR128XRegClass.contains(Reg)) { if (STI.getFeatureBits()[llvm::X86::FeatureAVX512]) - return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Z128rm, 128); + return CI.loadAndFinalize(Reg, 128, llvm::X86::VMOVDQU32Z128rm); if (STI.getFeatureBits()[llvm::X86::FeatureAVX]) - return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQUrm, 128); - return CI.loadAndFinalize(Reg, llvm::X86::MOVDQUrm, 128); + return CI.loadAndFinalize(Reg, 128, llvm::X86::VMOVDQUrm); + return CI.loadAndFinalize(Reg, 128, llvm::X86::MOVDQUrm); } if (llvm::X86::VR256XRegClass.contains(Reg)) { if (STI.getFeatureBits()[llvm::X86::FeatureAVX512]) - return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Z256rm, 256); + return CI.loadAndFinalize(Reg, 256, llvm::X86::VMOVDQU32Z256rm); if (STI.getFeatureBits()[llvm::X86::FeatureAVX]) - return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQUYrm, 256); + return CI.loadAndFinalize(Reg, 256, llvm::X86::VMOVDQUYrm); } if (llvm::X86::VR512RegClass.contains(Reg)) if (STI.getFeatureBits()[llvm::X86::FeatureAVX512]) - return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Zrm, 512); + return CI.loadAndFinalize(Reg, 512, llvm::X86::VMOVDQU32Zrm); if (llvm::X86::RSTRegClass.contains(Reg)) { if (Value.getBitWidth() == 32) - return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F32m, 32); + return CI.loadX87AndFinalize(Reg, 32, llvm::X86::LD_F32m); if (Value.getBitWidth() == 64) - return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F64m, 64); + return CI.loadX87AndFinalize(Reg, 64, llvm::X86::LD_F64m); if (Value.getBitWidth() == 80) - return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F80m, 80); + return CI.loadX87AndFinalize(Reg, 80, llvm::X86::LD_F80m); } if (Reg == llvm::X86::EFLAGS) return CI.popFlagAndFinalize(); - llvm_unreachable("Not yet implemented"); + return {}; // Not yet implemented. } std::unique_ptr Index: llvm/trunk/unittests/tools/llvm-exegesis/AArch64/TargetTest.cpp =================================================================== --- llvm/trunk/unittests/tools/llvm-exegesis/AArch64/TargetTest.cpp +++ llvm/trunk/unittests/tools/llvm-exegesis/AArch64/TargetTest.cpp @@ -15,11 +15,16 @@ namespace { +using llvm::APInt; +using llvm::MCInst; using testing::Gt; +using testing::IsEmpty; +using testing::Not; using testing::NotNull; -using testing::SizeIs; constexpr const char kTriple[] = "aarch64-unknown-linux"; +constexpr const char kGenericCpu[] = "generic"; +constexpr const char kNoFeatures[] = ""; class AArch64TargetTest : public ::testing::Test { protected: @@ -29,7 +34,10 @@ std::string error; Target_ = llvm::TargetRegistry::lookupTarget(kTriple, error); EXPECT_THAT(Target_, NotNull()); + STI_.reset( + Target_->createMCSubtargetInfo(kTriple, kGenericCpu, kNoFeatures)); } + static void SetUpTestCase() { LLVMInitializeAArch64TargetInfo(); LLVMInitializeAArch64Target(); @@ -37,9 +45,20 @@ InitializeAArch64ExegesisTarget(); } + std::vector setRegTo(unsigned Reg, const APInt &Value) { + return ExegesisTarget_->setRegTo(*STI_, Reg, Value); + } + const llvm::Target *Target_; const ExegesisTarget *const ExegesisTarget_; + std::unique_ptr STI_; }; +TEST_F(AArch64TargetTest, SetRegToConstant) { + // The AArch64 target currently doesn't know how to set register values. + const auto Insts = setRegTo(llvm::AArch64::X0, llvm::APInt()); + EXPECT_THAT(Insts, Not(IsEmpty())); +} + } // namespace } // namespace exegesis Index: llvm/trunk/unittests/tools/llvm-exegesis/ARM/AssemblerTest.cpp =================================================================== --- llvm/trunk/unittests/tools/llvm-exegesis/ARM/AssemblerTest.cpp +++ llvm/trunk/unittests/tools/llvm-exegesis/ARM/AssemblerTest.cpp @@ -30,12 +30,11 @@ }; TEST_F(ARMMachineFunctionGeneratorTest, DISABLED_JitFunction) { - Check(ExegesisTarget::getDefault(), {}, llvm::MCInst(), 0x1e, 0xff, 0x2f, - 0xe1); + Check({}, llvm::MCInst(), 0x1e, 0xff, 0x2f, 0xe1); } TEST_F(ARMMachineFunctionGeneratorTest, DISABLED_JitFunctionADDrr) { - Check(ExegesisTarget::getDefault(), {llvm::ARM::R0}, + Check({{llvm::ARM::R0, llvm::APInt()}}, MCInstBuilder(llvm::ARM::ADDrr) .addReg(llvm::ARM::R0) .addReg(llvm::ARM::R0) Index: llvm/trunk/unittests/tools/llvm-exegesis/Common/AssemblerUtils.h =================================================================== --- llvm/trunk/unittests/tools/llvm-exegesis/Common/AssemblerUtils.h +++ llvm/trunk/unittests/tools/llvm-exegesis/Common/AssemblerUtils.h @@ -32,7 +32,9 @@ const std::string &CpuName) : TT(TT), CpuName(CpuName), CanExecute(llvm::Triple(TT).getArch() == - llvm::Triple(llvm::sys::getProcessTriple()).getArch()) { + llvm::Triple(llvm::sys::getProcessTriple()).getArch()), + ET(ExegesisTarget::lookup(llvm::Triple(TT))) { + assert(ET); if (!CanExecute) { llvm::outs() << "Skipping execution, host:" << llvm::sys::getProcessTriple() << ", target:" << TT @@ -41,12 +43,12 @@ } template - inline void Check(const ExegesisTarget &ET, - llvm::ArrayRef RegsToDef, llvm::MCInst MCInst, - Bs... Bytes) { + inline void Check(llvm::ArrayRef RegisterInitialValues, + llvm::MCInst MCInst, Bs... Bytes) { ExecutableFunction Function = - (MCInst.getOpcode() == 0) ? assembleToFunction(ET, RegsToDef, {}) - : assembleToFunction(ET, RegsToDef, {MCInst}); + (MCInst.getOpcode() == 0) + ? assembleToFunction(RegisterInitialValues, {}) + : assembleToFunction(RegisterInitialValues, {MCInst}); ASSERT_THAT(Function.getFunctionBytes().str(), testing::ElementsAre(Bytes...)); if (CanExecute) { @@ -70,14 +72,12 @@ } ExecutableFunction - assembleToFunction(const ExegesisTarget &ET, - llvm::ArrayRef RegsToDef, + assembleToFunction(llvm::ArrayRef RegisterInitialValues, llvm::ArrayRef Instructions) { llvm::SmallString<256> Buffer; llvm::raw_svector_ostream AsmStream(Buffer); - assembleToStream(ET, createTargetMachine(), /*LiveIns=*/{}, - RegsToDef, Instructions, - AsmStream); + assembleToStream(*ET, createTargetMachine(), /*LiveIns=*/{}, + RegisterInitialValues, Instructions, AsmStream); return ExecutableFunction(createTargetMachine(), getObjectFromBuffer(AsmStream.str())); } @@ -85,6 +85,7 @@ const std::string TT; const std::string CpuName; const bool CanExecute; + const ExegesisTarget *const ET; }; } // namespace exegesis Index: llvm/trunk/unittests/tools/llvm-exegesis/X86/AssemblerTest.cpp =================================================================== --- llvm/trunk/unittests/tools/llvm-exegesis/X86/AssemblerTest.cpp +++ llvm/trunk/unittests/tools/llvm-exegesis/X86/AssemblerTest.cpp @@ -39,19 +39,12 @@ }; TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunction) { - Check(ExegesisTarget::getDefault(), {}, llvm::MCInst(), 0xc3); -} - -TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_Default) { - Check(ExegesisTarget::getDefault(), {EAX}, - MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX), 0x31, 0xc0, - 0xc3); + Check({}, llvm::MCInst(), 0xc3); } TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_X86) { - const auto *ET = ExegesisTarget::lookup(llvm::Triple("x86_64-unknown-linux")); - ASSERT_NE(ET, nullptr); - Check(*ET, {EAX}, MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX), + Check({{EAX, llvm::APInt(32, 1)}}, + MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX), // mov eax, 1 0xb8, 0x01, 0x00, 0x00, 0x00, // xor eax, eax @@ -59,15 +52,13 @@ } TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionMOV64ri) { - Check(ExegesisTarget::getDefault(), {}, - MCInstBuilder(MOV64ri32).addReg(RAX).addImm(42), 0x48, 0xc7, 0xc0, 0x2a, - 0x00, 0x00, 0x00, 0xc3); + Check({}, MCInstBuilder(MOV64ri32).addReg(RAX).addImm(42), 0x48, 0xc7, 0xc0, + 0x2a, 0x00, 0x00, 0x00, 0xc3); } TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionMOV32ri) { - Check(ExegesisTarget::getDefault(), {}, - MCInstBuilder(MOV32ri).addReg(EAX).addImm(42), 0xb8, 0x2a, 0x00, 0x00, - 0x00, 0xc3); + Check({}, MCInstBuilder(MOV32ri).addReg(EAX).addImm(42), 0xb8, 0x2a, 0x00, + 0x00, 0x00, 0xc3); } } // namespace Index: llvm/trunk/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp =================================================================== --- llvm/trunk/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp +++ llvm/trunk/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp @@ -261,7 +261,13 @@ using FakeSnippetGeneratorTest = SnippetGeneratorTest; -TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd16ri) { +testing::Matcher IsRegisterValue(unsigned Reg, + llvm::APInt Value) { + return testing::AllOf(testing::Field(&RegisterValue::Register, Reg), + testing::Field(&RegisterValue::Value, Value)); +} + +TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd16ri) { // ADD16ri: // explicit def 0 : reg RegClass=GR16 // explicit use 1 : reg RegClass=GR16 | TIED_TO:0 @@ -272,11 +278,11 @@ llvm::MCOperand::createReg(llvm::X86::AX); std::vector Snippet; Snippet.push_back(std::move(IB)); - const auto RegsToDef = Generator.computeRegsToDef(Snippet); - EXPECT_THAT(RegsToDef, UnorderedElementsAre(llvm::X86::AX)); + const auto RIV = Generator.computeRegisterInitialValues(Snippet); + EXPECT_THAT(RIV, ElementsAre(IsRegisterValue(llvm::X86::AX, llvm::APInt()))); } -TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd64rr) { +TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd64rr) { // ADD64rr: // mov64ri rax, 42 // add64rr rax, rax, rbx @@ -298,8 +304,8 @@ Snippet.push_back(std::move(Add)); } - const auto RegsToDef = Generator.computeRegsToDef(Snippet); - EXPECT_THAT(RegsToDef, UnorderedElementsAre(llvm::X86::RBX)); + const auto RIV = Generator.computeRegisterInitialValues(Snippet); + EXPECT_THAT(RIV, ElementsAre(IsRegisterValue(llvm::X86::RBX, llvm::APInt()))); } } // namespace Index: llvm/trunk/unittests/tools/llvm-exegesis/X86/TargetTest.cpp =================================================================== --- llvm/trunk/unittests/tools/llvm-exegesis/X86/TargetTest.cpp +++ llvm/trunk/unittests/tools/llvm-exegesis/X86/TargetTest.cpp @@ -125,7 +125,7 @@ } std::vector setRegTo(unsigned Reg, const APInt &Value) { - return ExegesisTarget_->setRegTo(*STI_, Value, Reg); + return ExegesisTarget_->setRegTo(*STI_, Reg, Value); } const llvm::Target *Target_; @@ -137,6 +137,16 @@ using Core2AvxTargetTest = X86TargetTest; using Core2Avx512TargetTest = X86TargetTest; +TEST_F(Core2TargetTest, SetFlags) { + const unsigned Reg = llvm::X86::EFLAGS; + EXPECT_THAT( + setRegTo(Reg, APInt(64, 0x1111222233334444ULL)), + ElementsAre(IsStackAllocate(8), + IsMovValueToStack(llvm::X86::MOV32mi, 0x33334444UL, 0), + IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 4), + OpcodeIs(llvm::X86::POPF64))); +} + TEST_F(Core2TargetTest, SetRegToGR8Value) { const uint8_t Value = 0xFFU; const unsigned Reg = llvm::X86::AL; @@ -285,7 +295,7 @@ setRegTo(llvm::X86::ST0, APInt(32, 0x11112222ULL)), ElementsAre(IsStackAllocate(4), IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 0), - testing::A(), IsStackDeallocate(4))); + OpcodeIs(llvm::X86::LD_F32m), IsStackDeallocate(4))); } TEST_F(Core2TargetTest, SetRegToST1_32Bits) { @@ -295,7 +305,8 @@ setRegTo(llvm::X86::ST1, APInt(32, 0x11112222ULL)), ElementsAre(IsStackAllocate(4), IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 0), - testing::A(), CopySt0ToSt1, IsStackDeallocate(4))); + OpcodeIs(llvm::X86::LD_F32m), CopySt0ToSt1, + IsStackDeallocate(4))); } TEST_F(Core2TargetTest, SetRegToST0_64Bits) { @@ -304,7 +315,7 @@ ElementsAre(IsStackAllocate(8), IsMovValueToStack(llvm::X86::MOV32mi, 0x33334444UL, 0), IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 4), - testing::A(), IsStackDeallocate(8))); + OpcodeIs(llvm::X86::LD_F64m), IsStackDeallocate(8))); } TEST_F(Core2TargetTest, SetRegToST0_80Bits) { @@ -314,7 +325,7 @@ IsMovValueToStack(llvm::X86::MOV32mi, 0x44445555UL, 0), IsMovValueToStack(llvm::X86::MOV32mi, 0x22223333UL, 4), IsMovValueToStack(llvm::X86::MOV16mi, 0x1111UL, 8), - testing::A(), IsStackDeallocate(10))); + OpcodeIs(llvm::X86::LD_F80m), IsStackDeallocate(10))); } } // namespace