diff --git a/llvm/test/tools/llvm-exegesis/X86/uops-VFMADDSS4rm.s b/llvm/test/tools/llvm-exegesis/X86/uops-VFMADDSS4rm.s --- a/llvm/test/tools/llvm-exegesis/X86/uops-VFMADDSS4rm.s +++ b/llvm/test/tools/llvm-exegesis/X86/uops-VFMADDSS4rm.s @@ -5,3 +5,6 @@ CHECK-NEXT: key: CHECK-NEXT: instructions: CHECK-NEXT: VFMADDSS4rm +CHECK: register_initial_values: +# FIXME: This will be changed to CHECK by the following patch that modeling MXCSR to VFMADDSS. +CHECK-NOT: MXCSR diff --git a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp --- a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp @@ -439,6 +439,8 @@ std::vector popFlagAndFinalize(); + std::vector loadMXCSRAndFinalize(bool HasAVX); + private: ConstantInliner &add(const MCInst &Inst) { Instructions.push_back(Inst); @@ -499,6 +501,19 @@ return std::move(Instructions); } +std::vector ConstantInliner::loadMXCSRAndFinalize(bool HasAVX) { + add(allocateStackSpace(4)); + add(fillStackSpace(X86::MOV32mi, 0, 0x1f80)); // Mask all FP exceptions + add(MCInstBuilder(HasAVX ? X86::VLDMXCSR : X86::LDMXCSR) + // Address = ESP + .addReg(X86::RSP) // BaseReg + .addImm(1) // ScaleAmt + .addReg(0) // IndexReg + .addImm(0) // Disp + .addReg(0)); // Segment + return std::move(Instructions); +} + void ConstantInliner::initStack(unsigned Bytes) { assert(Constant_.getBitWidth() <= Bytes * 8 && "Value does not have the correct size"); @@ -699,6 +714,8 @@ } if (Reg == X86::EFLAGS) return CI.popFlagAndFinalize(); + if (Reg == X86::MXCSR) + return CI.loadMXCSRAndFinalize(STI.getFeatureBits()[X86::FeatureAVX]); return {}; // Not yet implemented. }