Index: llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp =================================================================== --- llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -930,6 +930,10 @@ bool validateInstruction(MCInst &Inst, const OperandVector &Ops); bool processInstruction(MCInst &Inst, const OperandVector &Ops); + // Load Value Injection (LVI) Mitigations for machine code + bool applyLVICFIMitigation(MCInst &Inst); + bool applyLVILoadHardeningMitigation(MCInst &Inst, MCStreamer &Out); + /// Wrapper around MCStreamer::emitInstruction(). Possibly adds /// instrumentation around Inst. void emitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out); @@ -3149,9 +3153,91 @@ static const char *getSubtargetFeatureName(uint64_t Val); +/// RET instructions and also instructions that indirect calls/jumps from memory +/// combine a load and a branch within a single instruction. To mitigate these +/// instructions against LVI, they must be decomposed into separate load and +/// branch instructions, with an LFENCE in between. For more details, see: +/// - X86LoadValueInjectionRetHardening.cpp +/// - X86LoadValueInjectionIndirectThunks.cpp +/// - https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection +/// +/// Returns `true` if a mitigation was applied or warning was emitted. +bool X86AsmParser::applyLVICFIMitigation(MCInst &Inst) { + switch (Inst.getOpcode()) { + case X86::RET: + case X86::RETL: + case X86::RETQ: + case X86::RETIL: + case X86::RETIQ: + case X86::RETIW: + case X86::JMP16m: + case X86::JMP32m: + case X86::JMP64m: + case X86::JMP64m_REX: + case X86::FARJMP16m: + case X86::FARJMP32m: + case X86::FARJMP64: + case X86::CALL16m: + case X86::CALL32m: + case X86::CALL64m: + case X86::FARCALL16m: + case X86::FARCALL32m: + case X86::FARCALL64: + Warning(Inst.getLoc(), "Instruction may be vulnerable to LVI and " + "requires manual mitigation"); + return true; + } + return false; +} + +/// To mitigate LVI, every instruction that performs a load can be followed by +/// an LFENCE instruction to squash any potential mis-speculation. There are +/// some instructions that require additional considerations, and may requre +/// manual mitigation. For more details, see: +/// https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection +/// +/// Returns `true` if a mitigation was applied or warning was emitted. +bool X86AsmParser::applyLVILoadHardeningMitigation(MCInst &Inst, + MCStreamer &Out) { + auto Opcode = Inst.getOpcode(); + auto Flags = Inst.getFlags(); + if ((Flags & X86::REP_PREFIX) || (Flags & X86::REPNE_PREFIX)) { + switch (Opcode) { + case X86::CMPSB: + case X86::CMPSW: + case X86::CMPSL: + case X86::CMPSQ: + case X86::SCASB: + case X86::SCASW: + case X86::SCASL: + case X86::SCASQ: + Warning(Inst.getLoc(), "Instruction may be vulnerable to LVI and " + "requires manual mitigation"); + return true; + } + } + + const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); + // LFENCE has the mayLoad property, don't double fence. + if (MCID.mayLoad() && Inst.getOpcode() != X86::LFENCE) { + MCInst FenceInst; + FenceInst.setOpcode(X86::LFENCE); + FenceInst.setLoc(Inst.getLoc()); + Out.emitInstruction(FenceInst, getSTI()); + return true; + } + return false; +} + void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out) { Out.emitInstruction(Inst, getSTI()); + + if (getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity] && + applyLVICFIMitigation(Inst)) + return; + if (getSTI().getFeatureBits()[X86::FeatureLVILoadHardening]) + applyLVILoadHardeningMitigation(Inst, Out); } bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, Index: llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll @@ -0,0 +1,135 @@ +; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown -mattr=+lvi-load-hardening -mattr=+lvi-cfi < %s -o %t.out 2> %t.err +; RUN: FileCheck %s --check-prefix=X86 < %t.out +; RUN: FileCheck %s --check-prefix=WARN < %t.err + +; Test module-level assembly +module asm "pop %rbx" +module asm "ret" +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: ret + +; Function Attrs: noinline nounwind optnone uwtable +define dso_local void @test_inline_asm() { +entry: +; X86-LABEL: test_inline_asm: + call void asm sideeffect "mov 0x3fed(%rip),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: movq 16365(%rip), %rax +; X86-NEXT: lfence + call void asm sideeffect "movdqa 0x0(%rip),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: movdqa (%rip), %xmm0 +; X86-NEXT: lfence + call void asm sideeffect "movslq 0x3e5d(%rip),%rbx", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: movslq 15965(%rip), %rbx +; X86-NEXT: lfence + call void asm sideeffect "mov (%r12,%rax,8),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: movq (%r12,%rax,8), %rax +; X86-NEXT: lfence + call void asm sideeffect "movq (24)(%rsi), %r11", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: movq 24(%rsi), %r11 +; X86-NEXT: lfence + call void asm sideeffect "cmove %r12,%rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: cmoveq %r12, %rax +; X86-NOT: lfence + call void asm sideeffect "cmove (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: cmoveq (%r12), %rax +; X86-NEXT: lfence + call void asm sideeffect "pop %rbx", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: popq %rbx +; X86-NEXT: lfence + call void asm sideeffect "popq %rbx", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: popq %rbx +; X86-NEXT: lfence + call void asm sideeffect "xchg (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: xchgq %rax, (%r12) +; X86-NEXT: lfence + call void asm sideeffect "cmpxchg %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: cmpxchgq %r12, (%rax) +; X86-NEXT: lfence + call void asm sideeffect "vpxor (%rcx,%rdx,1),%ymm1,%ymm0", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: vpxor (%rcx,%rdx), %ymm1, %ymm0 +; X86-NEXT: lfence + call void asm sideeffect "vpmuludq 0x20(%rsi),%ymm0,%ymm12", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: vpmuludq 32(%rsi), %ymm0, %ymm12 +; X86-NEXT: lfence + call void asm sideeffect "vpexpandq 0x40(%rdi),%zmm8{%k2}{z}", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: vpexpandq 64(%rdi), %zmm8 {%k2} {z} +; X86-NEXT: lfence + call void asm sideeffect "addq (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: addq (%r12), %rax +; X86-NEXT: lfence + call void asm sideeffect "subq Lpoly+0(%rip), %rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: subq Lpoly+0(%rip), %rax +; X86-NEXT: lfence + call void asm sideeffect "adcq %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: adcq %r12, (%rax) +; X86-NEXT: lfence + call void asm sideeffect "negq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: negq (%rax) +; X86-NEXT: lfence + call void asm sideeffect "incq %rax", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: incq %rax +; X86-NOT: lfence + call void asm sideeffect "mulq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: mulq (%rax) +; X86-NEXT: lfence + call void asm sideeffect "imulq (%rax),%rdx", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: imulq (%rax), %rdx +; X86-NEXT: lfence + call void asm sideeffect "shlq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: shlq (%rax) +; X86-NEXT: lfence + call void asm sideeffect "shrq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: shrq (%rax) +; X86-NEXT: lfence + call void asm sideeffect "repz cmpsb %es:(%rdi),%ds:(%rsi)", "~{dirflag},~{fpsr},~{flags}"() #1 +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: repz cmpsb %es:(%rdi),%ds:(%rsi) +; X86: rep cmpsb %es:(%rdi), %ds:(%rsi) +; X86-NOT: lfence + call void asm sideeffect "repnz scasb", "~{dirflag},~{fpsr},~{flags}"() #1 +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: repnz scasb +; X86: repne scasb %es:(%rdi), %al +; X86-NOT: lfence + call void asm sideeffect "pinsrw $$0x6,(%eax),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: pinsrw $6, (%eax), %xmm0 +; X86-NEXT: lfence + call void asm sideeffect "ret", "~{dirflag},~{fpsr},~{flags}"() #1 +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: ret +; X86: retq +; X86-NOT: lfence + call void asm sideeffect "ret $$8", "~{dirflag},~{fpsr},~{flags}"() #1 +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: ret $8 +; X86: retq $8 +; X86-NOT: lfence + call void asm sideeffect "jmpq *(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1 +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: jmpq *(%rdx) +; X86: jmpq *(%rdx) +; X86-NOT: lfence + call void asm sideeffect "jmpq *0x100(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1 +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: jmpq *0x100(%rdx) +; X86: jmpq *256(%rdx) +; X86-NOT: lfence + call void asm sideeffect "callq *200(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1 +; WARN: warning: Instruction may be vulnerable to LVI +; WARN-NEXT: callq *200(%rdx) +; X86: callq *200(%rdx) +; X86-NOT: lfence + call void asm sideeffect "fldt 0x8(%rbp)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: fldt 8(%rbp) +; X86-NEXT: lfence + call void asm sideeffect "fld %st(0)", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: fld %st(0) +; X86-NOT: lfence +; Test assembler macros + call void asm sideeffect ".macro mplus1 x\0Aincq (\5Cx)\0A.endm\0Amplus1 %rcx", "~{dirflag},~{fpsr},~{flags}"() #1 +; X86: incq (%rcx) +; X86-NEXT: lfence + ret void +} + +attributes #1 = { nounwind }