Index: llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp =================================================================== --- llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -938,7 +938,7 @@ // Load Value Injection (LVI) Mitigations for machine code void emitWarningForSpecialLVIInstruction(SMLoc Loc); - bool applyLVICFIMitigation(MCInst &Inst); + bool applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out); bool applyLVILoadHardeningMitigation(MCInst &Inst, MCStreamer &Out); /// Wrapper around MCStreamer::emitInstruction(). Possibly adds @@ -3178,7 +3178,7 @@ /// - https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection /// /// Returns `true` if a mitigation was applied or warning was emitted. -bool X86AsmParser::applyLVICFIMitigation(MCInst &Inst) { +bool X86AsmParser::applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out) { // Information on control-flow instructions that require manual mitigation can // be found here: // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions @@ -3188,7 +3188,24 @@ case X86::RETQ: case X86::RETIL: case X86::RETIQ: - case X86::RETIW: + case X86::RETIW: { + MCInst ShlInst, FenceInst; + bool Parse32 = is32BitMode() || Code16GCC; + unsigned Basereg = + is64BitMode() ? X86::RSP : (Parse32 ? X86::ESP : X86::SP); + const MCExpr *Disp = MCConstantExpr::create(0, getContext()); + auto ShlMemOp = X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp, + /*BaseReg=*/Basereg, /*IndexReg=*/0, + /*Scale=*/1, SMLoc{}, SMLoc{}, 0); + ShlInst.setOpcode(X86::SHL64mi); + ShlMemOp->addMemOperands(ShlInst, 5); + ShlInst.addOperand(MCOperand::createImm(0)); + FenceInst.setOpcode(X86::LFENCE); + Out.emitInstruction(ShlInst, getSTI()); + Out.emitInstruction(FenceInst, getSTI()); + Out.emitInstruction(Inst, getSTI()); + return true; + } case X86::JMP16m: case X86::JMP32m: case X86::JMP64m: @@ -3196,6 +3213,7 @@ case X86::CALL32m: case X86::CALL64m: emitWarningForSpecialLVIInstruction(Inst.getLoc()); + Out.emitInstruction(Inst, getSTI()); return true; } return false; @@ -3226,12 +3244,14 @@ case X86::SCASL: case X86::SCASQ: emitWarningForSpecialLVIInstruction(Inst.getLoc()); + Out.emitInstruction(Inst, getSTI()); return true; } } else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) { // If a REP instruction is found on its own line, it may or may not be // followed by a vulnerable instruction. Emit a warning just in case. emitWarningForSpecialLVIInstruction(Inst.getLoc()); + Out.emitInstruction(Inst, getSTI()); return true; } @@ -3240,7 +3260,7 @@ if (MCID.mayLoad() && Inst.getOpcode() != X86::LFENCE) { MCInst FenceInst; FenceInst.setOpcode(X86::LFENCE); - FenceInst.setLoc(Inst.getLoc()); + Out.emitInstruction(Inst, getSTI()); Out.emitInstruction(FenceInst, getSTI()); return true; } @@ -3249,15 +3269,16 @@ void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out) { - Out.emitInstruction(Inst, getSTI()); - if (LVIInlineAsmHardening) { if (getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity] && - applyLVICFIMitigation(Inst)) + applyLVICFIMitigation(Inst, Out)) + return; + if (getSTI().getFeatureBits()[X86::FeatureLVILoadHardening] && + applyLVILoadHardeningMitigation(Inst, Out)) return; - if (getSTI().getFeatureBits()[X86::FeatureLVILoadHardening]) - applyLVILoadHardeningMitigation(Inst, Out); } + + Out.emitInstruction(Inst, getSTI()); } bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, Index: llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll =================================================================== --- llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll +++ llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll @@ -5,10 +5,11 @@ ; Test module-level assembly module asm "pop %rbx" module asm "ret" -; WARN: warning: Instruction may be vulnerable to LVI -; WARN-NEXT: ret -; WARN-NEXT: ^ -; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information +; X86: popq %rbx +; X86-NEXT: lfence +; X86-NEXT: shlq $0, (%rsp) +; X86-NEXT: lfence +; X86-NEXT: retq ; Function Attrs: noinline nounwind optnone uwtable define dso_local void @test_inline_asm() { @@ -106,18 +107,14 @@ ; X86: pinsrw $6, (%eax), %xmm0 ; X86-NEXT: lfence call void asm sideeffect "ret", "~{dirflag},~{fpsr},~{flags}"() #1 -; WARN: warning: Instruction may be vulnerable to LVI -; WARN-NEXT: ret -; WARN-NEXT: ^ -; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information -; X86: retq +; X86: shlq $0, (%rsp) +; X86-NEXT: lfence +; X86-NEXT: retq ; X86-NOT: lfence call void asm sideeffect "ret $$8", "~{dirflag},~{fpsr},~{flags}"() #1 -; WARN: warning: Instruction may be vulnerable to LVI -; WARN-NEXT: ret $8 -; WARN-NEXT: ^ -; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information -; X86: retq $8 +; X86: shlq $0, (%rsp) +; X86-NEXT: lfence +; X86-NEXT: retq $8 ; X86-NOT: lfence call void asm sideeffect "jmpq *(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1 ; WARN: warning: Instruction may be vulnerable to LVI