Index: include/llvm/Analysis/JumpInstrTableInfo.h =================================================================== --- include/llvm/Analysis/JumpInstrTableInfo.h +++ include/llvm/Analysis/JumpInstrTableInfo.h @@ -37,7 +37,9 @@ public: static char ID; - JumpInstrTableInfo(); + /// The default byte alignment for jump tables is 16, which is large but + /// usually safe. + JumpInstrTableInfo(uint64_t ByteAlign = 16); virtual ~JumpInstrTableInfo(); const char *getPassName() const override { return "Jump-Instruction Table Info"; @@ -52,9 +54,19 @@ /// Gets the tables. const JumpTables &getTables() const { return Tables; } + /// Gets the alignment in bytes of a jumptable entry. + uint64_t entryByteAlignment() const { return ByteAlignment; } private: JumpTables Tables; + + /// A power-of-two alignment of a jumptable entry. + uint64_t ByteAlignment; }; + +/// Creates a JumpInstrTableInfo pass with the given bound on entry size. This +/// bound specifies the maximum number of bytes needed to represent an +/// unconditional jump or a trap instruction in the back end currently in use. +ModulePass *createJumpInstrTableInfoPass(unsigned Bound); } #endif /* LLVM_ANALYSIS_JUMPINSTRTABLEINFO_H */ Index: include/llvm/CodeGen/CommandFlags.h =================================================================== --- include/llvm/CodeGen/CommandFlags.h +++ include/llvm/CodeGen/CommandFlags.h @@ -227,6 +227,44 @@ "Create one table per unique function type."), clEnumValEnd)); +cl::opt +FCFI("fcfi", + cl::desc("Apply forward-edge control-flow integrity"), + cl::init(false)); + +cl::opt +CFIType("cfi-type", + cl::desc("Choose the type of Control-Flow Integrity check to add"), + cl::init(CFIntegrity::Sub), + cl::values( + clEnumValN(CFIntegrity::Sub, "sub", + "Subtract the pointer from the table base, then mask."), + clEnumValN(CFIntegrity::Ror, "ror", + "Use rotate to check the offset from a table base."), + clEnumValN(CFIntegrity::Add, "add", + "Mask out the high bits and add to an aligned base."), + clEnumValEnd)); + +cl::opt +CFIEnforcing("cfi-enforcing", + cl::desc("Enforce CFI or pass the violation to a function."), + cl::init(false)); + +// Note that this option is linked to the cfi-enforcing option above: if +// cfi-enforcing is set, then the cfi-func-name option is entirely ignored. If +// cfi-enforcing is false and no cfi-func-name is set, then a default function +// will be generated that ignores all CFI violations. The expected signature for +// functions called with CFI violations is +// +// void (i8*, i8*) +// +// The first pointer is a C string containing the name of the function in which +// the violation occurs, and the second pointer is the pointer that violated +// CFI. +cl::opt +CFIFuncName("cfi-func-name", cl::desc("The name of the CFI function to call"), + cl::init("")); + // Common utility function tightly tied to the options listed here. Initializes // a TargetOptions object with CodeGen flags and returns it. static inline TargetOptions InitTargetOptionsFromCodeGenFlags() { @@ -254,6 +292,10 @@ Options.MCOptions = InitMCTargetOptionsFromFlags(); Options.JTType = JTableType; + Options.FCFI = FCFI; + Options.CFIType = CFIType; + Options.CFIEnforcing = CFIEnforcing; + Options.CFIFuncName = CFIFuncName; Options.ThreadModel = TMModel; Index: include/llvm/CodeGen/ForwardControlFlowIntegrity.h =================================================================== --- /dev/null +++ include/llvm/CodeGen/ForwardControlFlowIntegrity.h @@ -0,0 +1,123 @@ +//===-- ForwardControlFlowIntegrity.h: Forward-Edge CFI ---------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass instruments indirect calls with checks to ensure that these calls +// pass through the appropriate jump-instruction table generated by +// JumpInstrTables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_FORWARDCONTROLFLOWINTEGRITY_H +#define LLVM_CODEGEN_FORWARDCONTROLFLOWINTEGRITY_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Pass.h" +#include "llvm/Target/TargetOptions.h" + +#include + +namespace llvm { + +class AnalysisUsage; +class BasicBlock; +class Constant; +class Function; +class Instruction; +class Module; +class Value; + +/// ForwardControlFlowIntegrity uses the information from JumpInstrTableInfo to +/// prepend checks to indirect calls to make sure that these calls target valid +/// locations. +class ForwardControlFlowIntegrity : public ModulePass { +public: + static char ID; + + ForwardControlFlowIntegrity(); + ForwardControlFlowIntegrity(JumpTable::JumpTableType JTT, + CFIntegrity CFIType, + bool CFIEnforcing, std::string CFIFuncName); + virtual ~ForwardControlFlowIntegrity(); + + /// Runs the CFI pass on a given module. This works best if the module in + /// question is the result of link-time optimization (see lib/LTO). + bool runOnModule(Module &M) override; + const char *getPassName() const override { + return "Forward Control-Flow Integrity"; + } + void getAnalysisUsage(AnalysisUsage &AU) const override; + +private: + typedef SmallVector CallSet; + + /// A structure that is used to keep track of constant table information. + struct CFIConstants { + Constant *StartValue; + Constant *MaskValue; + Constant *Size; + }; + + /// A map from function type to the base of the table for this type and a mask + /// for the table + typedef DenseMap CFITables; + + CallSet IndirectCalls; + + /// The type of jumptable implementation. + JumpTable::JumpTableType JTType; + + /// The type of CFI check to add before each indirect call. + CFIntegrity CFIType; + + /// A value that controls whether or not CFI violations cause a halt. + bool CFIEnforcing; + + /// The name of the function to call in case of a CFI violation when + /// CFIEnforcing is false. There is a default function that ignores + /// violations. + std::string CFIFuncName; + + /// The alignment of each entry in the table, from JumpInstrTableInfo. The + /// JumpInstrTableInfo class always makes this a power of two. + uint64_t ByteAlignment; + + /// The base-2 logarithm of ByteAlignment, needed for some of the transforms + /// (like CFIntegrity::Ror) + unsigned LogByteAlignment; + + /// Adds checks to each indirect call site to make sure that it is calling a + /// function in our jump table. + void updateIndirectCalls(Module &M, CFITables &CFIT); + + /// Walks the instructions to find all the indirect calls. + void getIndirectCalls(Module &M); + + /// Adds a function that handles violations in non-enforcing mode + /// (!CFIEnforcing). The default warning function simply returns, since the + /// exact details of how to handle CFI violations depend on the application. + void addWarningFunction(Module &M); + + /// Rewrites a function pointer in a call/invoke instruction to force it into + /// a table. + void rewriteFunctionPointer(Module &M, Instruction *I, Value *FunPtr, + Constant *JumpTableStart, Constant *JumpTableMask, + Constant *JumpTableSize); + + /// Inserts a check and a call to a warning function at a given instruction + /// that must be an indirect call. + void insertWarning(Module &M, BasicBlock *Block, Instruction *I, + Value *FunPtr); +}; + +ModulePass * +createForwardControlFlowIntegrityPass(JumpTable::JumpTableType JTT, + CFIntegrity CFIType, + bool CFIEnforcing, StringRef CFIFuncName); +} + +#endif // LLVM_CODEGEN_FORWARDCONTROLFLOWINTEGRITY_H Index: include/llvm/CodeGen/JumpInstrTables.h =================================================================== --- include/llvm/CodeGen/JumpInstrTables.h +++ include/llvm/CodeGen/JumpInstrTables.h @@ -39,13 +39,14 @@ /// jmp f_orig@PLT /// \endverbatim /// -/// Support for an architecture depends on two functions in TargetInstrInfo: -/// getUnconditionalBranch, and getTrap. AsmPrinter uses these to generate the -/// appropriate instructions for the jump statement (an unconditional branch) -/// and for padding to make the table have a size that is a power of two. This -/// padding uses a trap instruction to ensure that calls to this area halt the -/// program. The default implementations of these functions call -/// llvm_unreachable. +/// Support for an architecture depends on three functions in TargetInstrInfo: +/// getUnconditionalBranch, getTrap, and getJumpInstrTableEntryBound. AsmPrinter +/// uses these to generate the appropriate instructions for the jump statement +/// (an unconditional branch) and for padding to make the table have a size that +/// is a power of two. This padding uses a trap instruction to ensure that calls +/// to this area halt the program. The default implementations of these +/// functions call llvm_unreachable, except for getJumpInstrTableEntryBound, +/// which returns 0 by default. class JumpInstrTables : public ModulePass { public: static char ID; @@ -64,6 +65,14 @@ /// Checks to see if there is already a table for the given FunctionType. bool hasTable(FunctionType *FunTy); + /// Maps the function into a subset of function types, depending on the + /// jump-instruction table style selected from JumpTableTypes in + /// JumpInstrTables.cpp. The choice of mapping determines the number of + /// jump-instruction tables generated by this pass. E.g., the simplest mapping + /// converts every function type into void f(); so, all functions end up in a + /// single table. + static FunctionType *transformType(JumpTable::JumpTableType JTT, + FunctionType *FunTy); private: /// The metadata used while a jump table is being built struct TableMeta { @@ -76,14 +85,6 @@ typedef DenseMap JumpMap; - /// Maps the function into a subset of function types, depending on the - /// jump-instruction table style selected from JumpTableTypes in - /// JumpInstrTables.cpp. The choice of mapping determines the number of - /// jump-instruction tables generated by this pass. E.g., the simplest mapping - /// converts every function type into void f(); so, all functions end up in a - /// single table. - FunctionType *transformType(FunctionType *FunTy); - /// The current state of functions and jump entries in the table(s). JumpMap Metadata; Index: include/llvm/CodeGen/Passes.h =================================================================== --- include/llvm/CodeGen/Passes.h +++ include/llvm/CodeGen/Passes.h @@ -602,6 +602,10 @@ /// createJumpInstrTables - This pass creates jump-instruction tables. ModulePass *createJumpInstrTablesPass(); + + /// createForwardControlFlowIntegrityPass - This pass adds control-flow + /// integrity. + ModulePass *createForwardControlFlowIntegrityPass(); } // End llvm namespace /// This initializer registers TargetMachine constructor, so the pass being Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -91,6 +91,7 @@ void initializeCFGPrinterPass(PassRegistry&); void initializeCFGSimplifyPassPass(PassRegistry&); void initializeCFLAliasAnalysisPass(PassRegistry&); +void initializeForwardControlFlowIntegrityPass(PassRegistry&); void initializeFlattenCFGPassPass(PassRegistry&); void initializeStructurizeCFGPass(PassRegistry&); void initializeCFGViewerPass(PassRegistry&); Index: include/llvm/Target/TargetInstrInfo.h =================================================================== --- include/llvm/Target/TargetInstrInfo.h +++ include/llvm/Target/TargetInstrInfo.h @@ -428,6 +428,22 @@ llvm_unreachable("Target didn't implement TargetInstrInfo::getTrap!"); } + /// getJumpInstrTableEntryBound - Get a number of bytes that suffices to hold + /// either the instruction returned by getUnconditionalBranch or the + /// instruction returned by getTrap. This only makes sense because + /// getUnconditionalBranch returns a single, specific instruction. This + /// information is needed by the jumptable construction code, since it must + /// decide how many bytes to use for a jumptable entry so it can generate the + /// right mask. + virtual unsigned getJumpInstrTableEntryBound() const { + // This method gets called by LLVMTargetMachine always, so it can't fail + // just because there happens to be no implementation for this target. + // Any code that tries to use a jumptable annotation without defining + // getUnconditionalBranch on the appropriate Target will fail anyway, and + // the value returned here won't matter in that case. + return 0; + } + /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic /// block at the specified instruction (i.e. instruction would be the start /// of a new basic block). Index: include/llvm/Target/TargetOptions.h =================================================================== --- include/llvm/Target/TargetOptions.h +++ include/llvm/Target/TargetOptions.h @@ -57,6 +57,14 @@ }; } + enum class CFIntegrity { + Sub, // Use subtraction-based checks. + Ror, // Use rotation-based checks. + Add // Use addition-based checks. This depends on having + // sufficient alignment in the code and is usually not + // feasible. + }; + class TargetOptions { public: TargetOptions() @@ -70,10 +78,11 @@ EnableFastISel(false), PositionIndependentExecutable(false), UseInitArray(false), DisableIntegratedAS(false), CompressDebugSections(false), FunctionSections(false), - DataSections(false), TrapUnreachable(false), TrapFuncName(""), + DataSections(false), TrapUnreachable(false), TrapFuncName(), FloatABIType(FloatABI::Default), AllowFPOpFusion(FPOpFusion::Standard), JTType(JumpTable::Single), - ThreadModel(ThreadModel::POSIX) {} + FCFI(false), ThreadModel(ThreadModel::POSIX), + CFIType(CFIntegrity::Sub), CFIEnforcing(false), CFIFuncName() {} /// PrintMachineCode - This flag is enabled when the -print-machineinstrs /// option is specified on the command line, and should enable debugging @@ -228,10 +237,28 @@ /// create for functions that have the jumptable attribute. JumpTable::JumpTableType JTType; + /// FCFI - This flags controls whether or not forward-edge control-flow + /// integrity is applied. + bool FCFI; + /// ThreadModel - This flag specifies the type of threading model to assume /// for things like atomics ThreadModel::Model ThreadModel; + /// CFIType - This flag specifies the type of control-flow integrity check + /// to add as a preamble to indirect calls. + CFIntegrity CFIType; + + /// CFIEnforcing - This flags controls whether or not CFI violations cause + /// the program to halt. + bool CFIEnforcing; + + /// getCFIFuncName - If this returns a non-empty string, then this is the + /// name of the function that will be called for each CFI violation in + /// non-enforcing mode. + std::string CFIFuncName; + StringRef getCFIFuncName() const; + /// Machine level options. MCTargetOptions MCOptions; }; Index: lib/Analysis/JumpInstrTableInfo.cpp =================================================================== --- lib/Analysis/JumpInstrTableInfo.cpp +++ lib/Analysis/JumpInstrTableInfo.cpp @@ -17,6 +17,7 @@ #include "llvm/Analysis/Passes.h" #include "llvm/IR/Function.h" #include "llvm/IR/Type.h" +#include "llvm/Support/MathExtras.h" using namespace llvm; @@ -28,7 +29,21 @@ return new JumpInstrTableInfo(); } -JumpInstrTableInfo::JumpInstrTableInfo() : ImmutablePass(ID), Tables() { +ModulePass *llvm::createJumpInstrTableInfoPass(unsigned Bound) { + // This cast is always safe, since Bound is always in a subset of uint64_t. + uint64_t B = static_cast(Bound); + return new JumpInstrTableInfo(B); +} + +JumpInstrTableInfo::JumpInstrTableInfo(uint64_t ByteAlign) + : ImmutablePass(ID), Tables(), ByteAlignment(ByteAlign) { + if (!llvm::isPowerOf2_64(ByteAlign)) { + // Note that we don't explicitly handle overflow here, since we handle the 0 + // case explicitly when a caller actually tries to create jumptable entries, + // and this is the return value on overflow. + ByteAlignment = llvm::NextPowerOf2(ByteAlign); + } + initializeJumpInstrTableInfoPass(*PassRegistry::getPassRegistry()); } Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -881,16 +881,17 @@ bool IsThumb = (Arch == Triple::thumb || Arch == Triple::thumbeb); MCInst TrapInst; TM.getSubtargetImpl()->getInstrInfo()->getTrap(TrapInst); + unsigned LogAlignment = llvm::Log2_64(JITI->entryByteAlignment()); + + // Emit the right section for these functions. + OutStreamer.SwitchSection(OutContext.getObjectFileInfo()->getTextSection()); for (const auto &KV : JITI->getTables()) { uint64_t Count = 0; for (const auto &FunPair : KV.second) { // Emit the function labels to make this be a function entry point. MCSymbol *FunSym = OutContext.GetOrCreateSymbol(FunPair.second->getName()); - OutStreamer.EmitSymbolAttribute(FunSym, MCSA_Global); - // FIXME: JumpTableInstrInfo should store information about the required - // alignment of table entries and the size of the padding instruction. - EmitAlignment(3); + EmitAlignment(LogAlignment); if (IsThumb) OutStreamer.EmitThumbFunc(FunSym); if (MAI->hasDotTypeDotSizeDirective()) @@ -912,10 +913,9 @@ } // Emit enough padding instructions to fill up to the next power of two. - // This assumes that the trap instruction takes 8 bytes or fewer. uint64_t Remaining = NextPowerOf2(Count) - Count; for (uint64_t C = 0; C < Remaining; ++C) { - EmitAlignment(3); + EmitAlignment(LogAlignment); OutStreamer.EmitInstruction(TrapInst, getSubtargetInfo()); } Index: lib/CodeGen/CMakeLists.txt =================================================================== --- lib/CodeGen/CMakeLists.txt +++ lib/CodeGen/CMakeLists.txt @@ -19,6 +19,7 @@ ExecutionDepsFix.cpp ExpandISelPseudos.cpp ExpandPostRAPseudos.cpp + ForwardControlFlowIntegrity.cpp GCMetadata.cpp GCMetadataPrinter.cpp GCStrategy.cpp Index: lib/CodeGen/ForwardControlFlowIntegrity.cpp =================================================================== --- /dev/null +++ lib/CodeGen/ForwardControlFlowIntegrity.cpp @@ -0,0 +1,393 @@ +//===-- ForwardControlFlowIntegrity.cpp: Forward-Edge CFI -----------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// \brief A pass that instruments code with fast checks for indirect calls and +/// hooks for a function to check violations. +/// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "cfi" + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/JumpInstrTableInfo.h" +#include "llvm/CodeGen/ForwardControlFlowIntegrity.h" +#include "llvm/CodeGen/JumpInstrTables.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Verifier.h" +#include "llvm/Pass.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +#include +#include + +using namespace llvm; + +STATISTIC(NumCFIIndirectCalls, + "Number of indirect call sites rewritten by the CFI pass"); + +char ForwardControlFlowIntegrity::ID = 0; +INITIALIZE_PASS_BEGIN(ForwardControlFlowIntegrity, "forward-cfi", + "Control-Flow Integrity", true, true) +INITIALIZE_PASS_DEPENDENCY(JumpInstrTableInfo); +INITIALIZE_PASS_DEPENDENCY(JumpInstrTables); +INITIALIZE_PASS_END(ForwardControlFlowIntegrity, "forward-cfi", + "Control-Flow Integrity", true, true) + +ModulePass *llvm::createForwardControlFlowIntegrityPass() { + return new ForwardControlFlowIntegrity(); +} + +ModulePass *llvm::createForwardControlFlowIntegrityPass( + JumpTable::JumpTableType JTT, CFIntegrity CFIType, bool CFIEnforcing, + StringRef CFIFuncName) { + return new ForwardControlFlowIntegrity(JTT, CFIType, CFIEnforcing, + CFIFuncName); +} + +// Checks to see if a given CallSite is making an indirect call, including +// cases where the indirect call is made through a bitcast. +static bool isIndirectCall(CallSite &CS) { + if (CS.getCalledFunction()) + return false; + + // Check the value to see if it is merely a bitcast of a function. In + // this case, it will translate to a direct function call in the resulting + // assembly, so we won't treat it as an indirect call here. + const Value *V = CS.getCalledValue(); + if (const ConstantExpr *CE = dyn_cast(V)) { + return !(CE->isCast() && isa(CE->getOperand(0))); + } + + // Otherwise, since we know it's a call, it must be an indirect call + return true; +} + +static const char cfi_failure_func_name[] = "__llvm_cfi_pointer_warning"; +static const char cfi_func_name_prefix[] = "__llvm_cfi_function_"; + +ForwardControlFlowIntegrity::ForwardControlFlowIntegrity() + : ModulePass(ID), IndirectCalls(), JTType(JumpTable::Single), + CFIType(CFIntegrity::Sub), CFIEnforcing(false), CFIFuncName("") { + initializeForwardControlFlowIntegrityPass(*PassRegistry::getPassRegistry()); +} + +ForwardControlFlowIntegrity::ForwardControlFlowIntegrity( + JumpTable::JumpTableType JTT, CFIntegrity CFIType, bool CFIEnforcing, + std::string CFIFuncName) + : ModulePass(ID), IndirectCalls(), JTType(JTT), CFIType(CFIType), + CFIEnforcing(CFIEnforcing), CFIFuncName(CFIFuncName) { + initializeForwardControlFlowIntegrityPass(*PassRegistry::getPassRegistry()); +} + +ForwardControlFlowIntegrity::~ForwardControlFlowIntegrity() {} + +void ForwardControlFlowIntegrity::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired(); + AU.addRequired(); +} + +void ForwardControlFlowIntegrity::getIndirectCalls(Module &M) { + // To get the indirect calls, we iterate over all functions and iterate over + // the list of basic blocks in each. We extract a total list of indirect calls + // before modifying any of them, since our modifications will modify the list + // of basic blocks. + for (Function &F : M) { + for (BasicBlock &BB : F) { + for (Instruction &I : BB) { + CallSite CS(&I); + if (!((CS.isCall() || CS.isInvoke()) && isIndirectCall(CS))) + continue; + + Value *CalledValue = CS.getCalledValue(); + + // Don't rewrite this instruction if the indirect call is actually just + // inline assembly, since our transformation will generate an invalid + // module in that case. + if (isa(CalledValue)) + continue; + + IndirectCalls.push_back(&I); + } + } + } +} + +void ForwardControlFlowIntegrity::updateIndirectCalls(Module &M, + CFITables &CFIT) { + Type *Int64Ty = Type::getInt64Ty(M.getContext()); + for (Instruction *I : IndirectCalls) { + CallSite CS(I); + Value *CalledValue = CS.getCalledValue(); + + // Get the function type for this call and look it up in the tables. + Type *VTy = CalledValue->getType(); + PointerType *PTy = dyn_cast(VTy); + Type *EltTy = PTy->getElementType(); + FunctionType *FunTy = dyn_cast(EltTy); + FunctionType *TransformedTy = JumpInstrTables::transformType(JTType, FunTy); + ++NumCFIIndirectCalls; + Constant *JumpTableStart = nullptr; + Constant *JumpTableMask = nullptr; + Constant *JumpTableSize = nullptr; + + // Some call sites have function types that don't correspond to any + // address-taken function in the module. This happens when function pointers + // are passed in from external code. + auto it = CFIT.find(TransformedTy); + if (it == CFIT.end()) { + // In this case, make sure that the function pointer will change by + // setting the mask and the start to be 0 so that the transformed + // function is 0. + JumpTableStart = ConstantInt::get(Int64Ty, 0); + JumpTableMask = ConstantInt::get(Int64Ty, 0); + JumpTableSize = ConstantInt::get(Int64Ty, 0); + } else { + JumpTableStart = it->second.StartValue; + JumpTableMask = it->second.MaskValue; + JumpTableSize = it->second.Size; + } + + rewriteFunctionPointer(M, I, CalledValue, JumpTableStart, JumpTableMask, + JumpTableSize); + } + + return; +} + +bool ForwardControlFlowIntegrity::runOnModule(Module &M) { + JumpInstrTableInfo *JITI = &getAnalysis(); + Type *Int64Ty = Type::getInt64Ty(M.getContext()); + Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext()); + + // JumpInstrTableInfo stores information about the alignment of each entry. + // The alignment returned by JumpInstrTableInfo is alignment in bytes, not + // in the exponent. + ByteAlignment = JITI->entryByteAlignment(); + LogByteAlignment = llvm::Log2_64(ByteAlignment); + + // Set up tables for control-flow integrity based on information about the + // jump-instruction tables. + CFITables CFIT; + for (const auto &KV : JITI->getTables()) { + uint64_t Size = static_cast(KV.second.size()); + uint64_t TableSize = NextPowerOf2(Size); + + int64_t MaskValue = ((TableSize << LogByteAlignment) - 1) & -ByteAlignment; + Constant *JumpTableMaskValue = ConstantInt::get(Int64Ty, MaskValue); + Constant *JumpTableSize = ConstantInt::get(Int64Ty, Size); + + // The base of the table is defined to be the first jumptable function in + // the table. + Function *First = KV.second.begin()->second; + Constant *JumpTableStartValue = ConstantExpr::getBitCast(First, VoidPtrTy); + CFIT[KV.first].StartValue = JumpTableStartValue; + CFIT[KV.first].MaskValue = JumpTableMaskValue; + CFIT[KV.first].Size = JumpTableSize; + } + + if (CFIT.empty()) + return false; + + getIndirectCalls(M); + + if (!CFIEnforcing) { + addWarningFunction(M); + } + + // Update the instructions with the check and the indirect jump through our + // table. + updateIndirectCalls(M, CFIT); + + return true; +} + +void ForwardControlFlowIntegrity::addWarningFunction(Module &M) { + PointerType *CharPtrTy = Type::getInt8PtrTy(M.getContext()); + + // Get the type of the Warning Function: void (i8*, i8*), + // where the first argument is the name of the function in which the violation + // occurs, and the second is the function pointer that violates CFI. + SmallVector WarningFunArgs; + WarningFunArgs.push_back(CharPtrTy); + WarningFunArgs.push_back(CharPtrTy); + FunctionType *WarningFunTy = + FunctionType::get(Type::getVoidTy(M.getContext()), WarningFunArgs, false); + + if (!CFIFuncName.empty()) { + Constant *FailureFun = M.getOrInsertFunction(CFIFuncName, WarningFunTy); + if (!FailureFun) + llvm_unreachable("Could not get or insert the function specified by" + " -cfi-func-name"); + } else { + // The default warning function swallows the warning and lets the call + // continue, since there's no generic way for it to print out this + // information. + Function *WarningFun = M.getFunction(cfi_failure_func_name); + if (!WarningFun) { + WarningFun = + Function::Create(WarningFunTy, GlobalValue::LinkOnceAnyLinkage, + cfi_failure_func_name, &M); + } + + BasicBlock *Entry = + BasicBlock::Create(M.getContext(), "entry", WarningFun, 0); + ReturnInst::Create(M.getContext(), Entry); + } +} + +void ForwardControlFlowIntegrity::rewriteFunctionPointer( + Module &M, Instruction *I, Value *FunPtr, Constant *JumpTableStart, + Constant *JumpTableMask, Constant *JumpTableSize) { + IRBuilder<> TempBuilder(I); + + Type *OrigFunType = FunPtr->getType(); + + BasicBlock *CurBB = cast(I->getParent()); + Function *CurF = cast(CurBB->getParent()); + Type *Int64Ty = Type::getInt64Ty(M.getContext()); + + Value *TI = TempBuilder.CreatePtrToInt(FunPtr, Int64Ty); + Value *TStartInt = TempBuilder.CreatePtrToInt(JumpTableStart, Int64Ty); + + Value *NewFunPtr = nullptr; + Value *Check = nullptr; + switch (CFIType) { + case CFIntegrity::Sub: { + // This is the subtract, mask, and add version. + // Subtract from the base. + Value *Sub = TempBuilder.CreateSub(TI, TStartInt); + + // Mask the difference to force this to be a table offset. + Value *And = TempBuilder.CreateAnd(Sub, JumpTableMask); + + // Add it back to the base. + Value *Result = TempBuilder.CreateAdd(And, TStartInt); + + // Convert it back into a function pointer that we can call. + NewFunPtr = TempBuilder.CreateIntToPtr(Result, OrigFunType); + break; + } + case CFIntegrity::Ror: { + // This is the subtract and rotate version. + // Rotate right by the alignment value. The optimizer should recognize + // this sequence as a rotation. + + // This cast is safe, since unsigned is always a subset of uint64_t. + uint64_t LogByteAlignment64 = static_cast(LogByteAlignment); + Constant *RightShift = ConstantInt::get(Int64Ty, LogByteAlignment64); + Constant *LeftShift = ConstantInt::get(Int64Ty, 64 - LogByteAlignment64); + + // Subtract from the base. + Value *Sub = TempBuilder.CreateSub(TI, TStartInt); + + // Create the equivalent of a rotate-right instruction. + Value *Shr = TempBuilder.CreateLShr(Sub, RightShift); + Value *Shl = TempBuilder.CreateShl(Sub, LeftShift); + Value *Or = TempBuilder.CreateOr(Shr, Shl); + + // Perform unsigned comparison to check for inclusion in the table. + Check = TempBuilder.CreateICmpULT(Or, JumpTableSize); + NewFunPtr = FunPtr; + break; + } + case CFIntegrity::Add: { + // This is the mask and add version. + // Mask the function pointer to turn it into an offset into the table. + Value *And = TempBuilder.CreateAnd(TI, JumpTableMask); + + // Then or this offset to the base and get the pointer value. + Value *Result = TempBuilder.CreateAdd(And, TStartInt); + + // Convert it back into a function pointer that we can call. + NewFunPtr = TempBuilder.CreateIntToPtr(Result, OrigFunType); + break; + } + } + + if (!CFIEnforcing) { + // If a check hasn't been added (in the rotation version), then check to see + // if it's the same as the original function. This check determines whether + // or not we call the CFI failure function. + if (!Check) + Check = TempBuilder.CreateICmpEQ(NewFunPtr, FunPtr); + BasicBlock *InvalidPtrBlock = + BasicBlock::Create(M.getContext(), "invalid.ptr", CurF, 0); + BasicBlock *ContinuationBB = CurBB->splitBasicBlock(I); + + // Remove the unconditional branch that connects the two blocks. + TerminatorInst *TermInst = CurBB->getTerminator(); + TermInst->eraseFromParent(); + + // Add a conditional branch that depends on the Check above. + BranchInst::Create(ContinuationBB, InvalidPtrBlock, Check, CurBB); + + // Call the warning function for this pointer, then continue. + Instruction *BI = BranchInst::Create(ContinuationBB, InvalidPtrBlock); + insertWarning(M, InvalidPtrBlock, BI, FunPtr); + } else { + // Modify the instruction to call this value. + CallSite CS(I); + CS.setCalledFunction(NewFunPtr); + } +} + +void ForwardControlFlowIntegrity::insertWarning(Module &M, BasicBlock *Block, + Instruction *I, Value *FunPtr) { + Function *ParentFun = cast(Block->getParent()); + + // Get the function to call right before the instruction. + Function *WarningFun = nullptr; + if (CFIFuncName.empty()) { + WarningFun = M.getFunction(cfi_failure_func_name); + } else { + WarningFun = M.getFunction(CFIFuncName); + } + + assert(WarningFun && "Could not find the CFI failure function"); + + // Look up or create a GlobalVariable + // __llvm_cfi_function_ParentName containing this name. + StringRef ParentName(ParentFun->getName()); + std::string GVName = + Twine(cfi_func_name_prefix).concat(ParentFun->getName()).str(); + GlobalVariable *ParentNameGV = M.getNamedGlobal(GVName); + if (!ParentNameGV) { + Type *CharTy = Type::getInt8Ty(M.getContext()); + ArrayType *ParentNameStringTy = + ArrayType::get(CharTy, ParentName.size() + 1); + + ParentNameGV = new GlobalVariable(M, ParentNameStringTy, true, + GlobalValue::PrivateLinkage, 0, ".str"); + Constant *ParentNameStrConst = + ConstantDataArray::getString(M.getContext(), ParentName, true); + ParentNameGV->setInitializer(ParentNameStrConst); + } + + Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext()); + + IRBuilder<> WarningInserter(I); + Value *ParentNamePtr = WarningInserter.CreateBitCast(ParentNameGV, VoidPtrTy); + Value *FunVoidPtr = WarningInserter.CreateBitCast(FunPtr, VoidPtrTy); + WarningInserter.CreateCall2(WarningFun, ParentNamePtr, FunVoidPtr); +} Index: lib/CodeGen/JumpInstrTables.cpp =================================================================== --- lib/CodeGen/JumpInstrTables.cpp +++ lib/CodeGen/JumpInstrTables.cpp @@ -163,7 +163,7 @@ Function *JumpInstrTables::insertEntry(Module &M, Function *Target) { FunctionType *OrigFunTy = Target->getFunctionType(); - FunctionType *FunTy = transformType(OrigFunTy); + FunctionType *FunTy = transformType(JTType, OrigFunTy); JumpMap::iterator it = Metadata.find(FunTy); if (Metadata.end() == it) { @@ -191,11 +191,12 @@ } bool JumpInstrTables::hasTable(FunctionType *FunTy) { - FunctionType *TransTy = transformType(FunTy); + FunctionType *TransTy = transformType(JTType, FunTy); return Metadata.end() != Metadata.find(TransTy); } -FunctionType *JumpInstrTables::transformType(FunctionType *FunTy) { +FunctionType *JumpInstrTables::transformType(JumpTable::JumpTableType JTT, + FunctionType *FunTy) { // Returning nullptr forces all types into the same table, since all types map // to the same type Type *VoidPtrTy = Type::getInt8PtrTy(FunTy->getContext()); @@ -211,7 +212,7 @@ Type *Int32Ty = Type::getInt32Ty(FunTy->getContext()); FunctionType *VoidFnTy = FunctionType::get( Type::getVoidTy(FunTy->getContext()), EmptyParams, false); - switch (JTType) { + switch (JTT) { case JumpTable::Single: return FunctionType::get(RetTy, EmptyParams, false); @@ -253,10 +254,10 @@ bool JumpInstrTables::runOnModule(Module &M) { JITI = &getAnalysis(); - // Get the set of jumptable-annotated functions. + // Get the set of jumptable-annotated functions that have their address taken. DenseMap Functions; for (Function &F : M) { - if (F.hasFnAttribute(Attribute::JumpTable)) { + if (F.hasFnAttribute(Attribute::JumpTable) && F.hasAddressTaken()) { assert(F.hasUnnamedAddr() && "Attribute 'jumptable' requires 'unnamed_addr'"); Functions[&F] = nullptr; Index: lib/CodeGen/LLVMTargetMachine.cpp =================================================================== --- lib/CodeGen/LLVMTargetMachine.cpp +++ lib/CodeGen/LLVMTargetMachine.cpp @@ -13,8 +13,10 @@ #include "llvm/Target/TargetMachine.h" +#include "llvm/Analysis/JumpInstrTableInfo.h" #include "llvm/Analysis/Passes.h" #include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/ForwardControlFlowIntegrity.h" #include "llvm/CodeGen/JumpInstrTables.h" #include "llvm/CodeGen/MachineFunctionAnalysis.h" #include "llvm/CodeGen/MachineModuleInfo.h" @@ -143,8 +145,13 @@ AnalysisID StopAfter) { // Passes to handle jumptable function annotations. These can't be handled at // JIT time, so we don't add them directly to addPassesToGenerateCode. - PM.add(createJumpInstrTableInfoPass()); + PM.add(createJumpInstrTableInfoPass( + getSubtargetImpl()->getInstrInfo()->getJumpInstrTableEntryBound())); PM.add(createJumpInstrTablesPass(Options.JTType)); + if (Options.FCFI) + PM.add(createForwardControlFlowIntegrityPass( + Options.JTType, Options.CFIType, Options.CFIEnforcing, + Options.getCFIFuncName())); // Add common CodeGen passes. MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify, Index: lib/CodeGen/TargetOptionsImpl.cpp =================================================================== --- lib/CodeGen/TargetOptionsImpl.cpp +++ lib/CodeGen/TargetOptionsImpl.cpp @@ -51,3 +51,10 @@ StringRef TargetOptions::getTrapFunctionName() const { return TrapFuncName; } + +/// getCFIFuncName - If this returns a non-empty string, then it is the name of +/// the function that gets called on CFI violations in CFI non-enforcing mode +/// (!TargetOptions::CFIEnforcing). +StringRef TargetOptions::getCFIFuncName() const { + return CFIFuncName; +} Index: lib/Target/ARM/ARMBaseInstrInfo.h =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.h +++ lib/Target/ARM/ARMBaseInstrInfo.h @@ -289,12 +289,6 @@ void breakPartialRegDependency(MachineBasicBlock::iterator, unsigned, const TargetRegisterInfo *TRI) const override; - void - getUnconditionalBranch(MCInst &Branch, - const MCSymbolRefExpr *BranchTarget) const override; - - void getTrap(MCInst &MI) const override; - /// Get the number of addresses by LDM or VLDM or zero for unknown. unsigned getNumLDMAddresses(const MachineInstr *MI) const; Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -4489,29 +4489,6 @@ MI->addRegisterKilled(DReg, TRI, true); } -void ARMBaseInstrInfo::getUnconditionalBranch( - MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const { - if (Subtarget.isThumb()) - Branch.setOpcode(ARM::tB); - else if (Subtarget.isThumb2()) - Branch.setOpcode(ARM::t2B); - else - Branch.setOpcode(ARM::Bcc); - - Branch.addOperand(MCOperand::CreateExpr(BranchTarget)); - Branch.addOperand(MCOperand::CreateImm(ARMCC::AL)); - Branch.addOperand(MCOperand::CreateReg(0)); -} - -void ARMBaseInstrInfo::getTrap(MCInst &MI) const { - if (Subtarget.isThumb()) - MI.setOpcode(ARM::tTRAP); - else if (Subtarget.useNaClTrap()) - MI.setOpcode(ARM::TRAPNaCl); - else - MI.setOpcode(ARM::TRAP); -} - bool ARMBaseInstrInfo::hasNOP() const { return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0; } Index: lib/Target/X86/X86InstrInfo.h =================================================================== --- lib/Target/X86/X86InstrInfo.h +++ lib/Target/X86/X86InstrInfo.h @@ -413,6 +413,8 @@ void getTrap(MCInst &MI) const override; + unsigned getJumpInstrTableEntryBound() const override; + bool isHighLatencyDef(int opc) const override; bool hasHighOperandLatency(const InstrItineraryData *ItinData, Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -5416,16 +5416,32 @@ NopInst.setOpcode(X86::NOOP); } +// This code must remain in sync with getJumpInstrTableEntryBound in this class! +// In particular, getJumpInstrTableEntryBound must always return an upper bound +// on the encoding lengths of the instructions generated by +// getUnconditionalBranch and getTrap. void X86InstrInfo::getUnconditionalBranch( MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const { Branch.setOpcode(X86::JMP_4); Branch.addOperand(MCOperand::CreateExpr(BranchTarget)); } +// This code must remain in sync with getJumpInstrTableEntryBound in this class! +// In particular, getJumpInstrTableEntryBound must always return an upper bound +// on the encoding lengths of the instructions generated by +// getUnconditionalBranch and getTrap. void X86InstrInfo::getTrap(MCInst &MI) const { MI.setOpcode(X86::TRAP); } +// See getTrap and getUnconditionalBranch for conditions on the value returned +// by this function. +unsigned X86InstrInfo::getJumpInstrTableEntryBound() const { + // 5 bytes suffice: JMP_4 Symbol@PLT is uses 1 byte (E9) for the JMP_4 and 4 + // bytes for the symbol offset. And TRAP is ud2, which is two bytes (0F 0B). + return 5; +} + bool X86InstrInfo::isHighLatencyDef(int opc) const { switch (opc) { default: return false; Index: test/CodeGen/ARM/jump_tables.ll =================================================================== --- test/CodeGen/ARM/jump_tables.ll +++ /dev/null @@ -1,32 +0,0 @@ -; RUN: llc <%s -mtriple=arm-unknown-linux-gnueabi -jump-table-type=single | FileCheck --check-prefix=ARM %s -; RUN: llc <%s -mtriple=thumb-unknown-linux-gnueabi -jump-table-type=single | FileCheck --check-prefix=THUMB %s - -define void @indirect_fun() unnamed_addr jumptable { - ret void -} -define void ()* @get_fun() { - ret void ()* @indirect_fun - -; ARM: ldr r0, [[LABEL:.*]] -; ARM: mov pc, lr -; ARM: [[LABEL]]: -; ARM: .long __llvm_jump_instr_table_0_1 - -; THUMB: ldr r0, [[LABEL:.*]] -; THUMB: bx lr -; THUMB: [[LABEL]]: -; THUMB: .long __llvm_jump_instr_table_0_1 -} - -; ARM: .globl __llvm_jump_instr_table_0_1 -; ARM: .align 3 -; ARM: .type __llvm_jump_instr_table_0_1,%function -; ARM: __llvm_jump_instr_table_0_1: -; ARM: b indirect_fun(PLT) - -; THUMB: .globl __llvm_jump_instr_table_0_1 -; THUMB: .align 3 -; THUMB: .thumb_func -; THUMB: .type __llvm_jump_instr_table_0_1,%function -; THUMB: __llvm_jump_instr_table_0_1: -; THUMB: b indirect_fun(PLT) Index: test/CodeGen/X86/cfi_enforcing.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/cfi_enforcing.ll @@ -0,0 +1,31 @@ +; RUN: llc -fcfi -cfi-enforcing <%s | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" +define void @indirect_fun() unnamed_addr jumptable { + ret void +} + +define i32 @m(void ()* %fun) { + call void ()* %fun() +; CHECK: subl +; CHECK: andq $8, +; CHECK: leaq __llvm_jump_instr_table_0_1(%r +; CHECK-NOT: callq __llvm_cfi_pointer_warning +; CHECK: callq *%r + ret i32 0 +} + +define void ()* @get_fun() { + ret void ()* @indirect_fun +} + +define i32 @main(i32 %argc, i8** %argv) { + %f = call void ()* ()* @get_fun() + %a = call i32 @m(void ()* %f) + ret i32 %a +} +; XFAIL: win32 + +; CHECK: .align 8 +; CHECK: __llvm_jump_instr_table_0_1: +; CHECK: jmp indirect_fun@PLT Index: test/CodeGen/X86/cfi_invoke.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/cfi_invoke.ll @@ -0,0 +1,34 @@ +; RUN: llc <%s -fcfi -cfi-type=sub | FileCheck %s +; ModuleID = 'test.cc' +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare i32 @__gxx_personality_v0(...) + +@_ZTIPKc = external constant i8* +@_ZTIi = external constant i8* + +define void @f() unnamed_addr jumptable { + ret void +} + +@a = global void ()* @f + +; Make sure invoke gets targeted as well as regular calls +define void @_Z3foov(void ()* %f) uwtable ssp { +; CHECK-LABEL: _Z3foov: + entry: + invoke void %f() + to label %try.cont unwind label %lpad +; CHECK: callq __llvm_cfi_pointer_warning +; CHECK: callq *%rbx + + lpad: + %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + catch i8* bitcast (i8** @_ZTIi to i8*) + filter [1 x i8*] [i8* bitcast (i8** @_ZTIPKc to i8*)] + ret void + + try.cont: + ret void +} \ No newline at end of file Index: test/CodeGen/X86/cfi_non_default_function.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/cfi_non_default_function.ll @@ -0,0 +1,28 @@ +; RUN: llc -fcfi -cfi-func-name=cfi_new_failure <%s | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" +define void @indirect_fun() unnamed_addr jumptable { + ret void +} + +define i32 @m(void ()* %fun) { +; CHECK-LABEL: @m + call void ()* %fun() +; CHECK: callq cfi_new_failure + ret i32 0 +} + +define void ()* @get_fun() { + ret void ()* @indirect_fun +} + +define i32 @main(i32 %argc, i8** %argv) { + %f = call void ()* ()* @get_fun() + %a = call i32 @m(void ()* %f) + ret i32 %a +} +; XFAIL: win32 + +; CHECK: .align 8 +; CHECK: __llvm_jump_instr_table_0_1: +; CHECK: jmp indirect_fun@PLT Index: test/CodeGen/X86/cfi_simple_indirect_call.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/cfi_simple_indirect_call.ll @@ -0,0 +1,44 @@ +; RUN: llc -fcfi -cfi-type=sub <%s | FileCheck --check-prefix=SUB %s +; RUN: llc -fcfi -cfi-type=add <%s | FileCheck --check-prefix=ADD %s +; RUN: llc -fcfi -cfi-type=ror <%s | FileCheck --check-prefix=ROR %s + +target triple = "x86_64-unknown-linux-gnu" + +define void @indirect_fun() unnamed_addr jumptable { + ret void +} + +define i32 @m(void ()* %fun) { + call void ()* %fun() +; SUB: subl +; SUB: andq $8 +; SUB-LABEL: leaq __llvm_jump_instr_table_0_1 +; SUB-LABEL: callq __llvm_cfi_pointer_warning + +; ROR: subq +; ROR: rolq $61 +; ROR: testq +; ROR-LABEL: callq __llvm_cfi_pointer_warning + +; ADD: andq $8 +; ADD-LABEL: leaq __llvm_jump_instr_table_0_1 +; ADD: cmpq +; ADD-LABEL: callq __llvm_cfi_pointer_warning +ret i32 0 +} + +define void ()* @get_fun() { + ret void ()* @indirect_fun +} + +define i32 @main(i32 %argc, i8** %argv) { + %f = call void ()* ()* @get_fun() + %a = call i32 @m(void ()* %f) + ret i32 %a +} +; XFAIL: win32 +; SUB: .text +; SUB: .align 8 +; SUB-LABEL: .type __llvm_jump_instr_table_0_1,@function +; SUB-LABEL:__llvm_jump_instr_table_0_1: +; SUB-LABEL: jmp indirect_fun@PLT Index: test/CodeGen/X86/jump_table_alias.ll =================================================================== --- test/CodeGen/X86/jump_table_alias.ll +++ test/CodeGen/X86/jump_table_alias.ll @@ -25,7 +25,6 @@ ; There should only be one table, even though there are two GlobalAliases, ; because they both alias the same value. -; CHECK: .globl __llvm_jump_instr_table_0_1 ; CHECK: .align 8, 0x90 ; CHECK: .type __llvm_jump_instr_table_0_1,@function ; CHECK: __llvm_jump_instr_table_0_1: Index: test/CodeGen/X86/jump_table_align.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/jump_table_align.ll @@ -0,0 +1,29 @@ +; RUN: llc -filetype=obj <%s -jump-table-type=single -o %t1 +; RUN: llvm-objdump -triple=x86_64-unknown-linux-gnu -d %t1 | FileCheck %s +target triple = "x86_64-unknown-linux-gnu" +define i32 @f() unnamed_addr jumptable { + ret i32 0 +} + +define i32 @g(i8* %a) unnamed_addr jumptable { + ret i32 0 +} + +define void @h(void ()* %func) unnamed_addr jumptable { + ret void +} + +define i32 @main() { + %g = alloca i32 (...)*, align 8 + store i32 (...)* bitcast (i32 ()* @f to i32 (...)*), i32 (...)** %g, align 8 + %1 = load i32 (...)** %g, align 8 + %call = call i32 (...)* %1() + call void (void ()*)* @h(void ()* bitcast (void (void ()*)* @h to void ()*)) + %a = call i32 (i32*)* bitcast (i32 (i8*)* @g to i32(i32*)*)(i32* null) + ret i32 %a +} + +; Make sure that the padding from getJumpInstrTableEntryBound is right. +; CHECK: __llvm_jump_instr_table_0_1: +; CHECK-NEXT: e9 00 00 00 00 jmp 0 +; CHECK-NEXT: 0f 1f 00 nopl (%rax) Index: test/CodeGen/X86/jump_table_bitcast.ll =================================================================== --- test/CodeGen/X86/jump_table_bitcast.ll +++ test/CodeGen/X86/jump_table_bitcast.ll @@ -15,12 +15,12 @@ define i32 @main() { %g = alloca i32 (...)*, align 8 store i32 (...)* bitcast (i32 ()* @f to i32 (...)*), i32 (...)** %g, align 8 -; CHECK: movq $__llvm_jump_instr_table_0_[[ENTRY:1|2|3]], (%rsp) -; CHECK: movl $__llvm_jump_instr_table_0_[[ENTRY]], %ecx +; CHECK: movq $__llvm_jump_instr_table_0_[[ENTRY:1|2|3]], +; CHECK: movl $__llvm_jump_instr_table_0_[[ENTRY]], %1 = load i32 (...)** %g, align 8 %call = call i32 (...)* %1() call void (void ()*)* @h(void ()* bitcast (void (void ()*)* @h to void ()*)) -; CHECK: movl $__llvm_jump_instr_table_0_{{1|2|3}}, %edi +; CHECK: movl $__llvm_jump_instr_table_0_{{1|2|3}}, ; CHECK: callq h %a = call i32 (i32*)* bitcast (i32 (i8*)* @g to i32(i32*)*)(i32* null) @@ -28,17 +28,14 @@ ret i32 %a } -; CHECK: .globl __llvm_jump_instr_table_0_1 ; CHECK: .align 8, 0x90 ; CHECK: .type __llvm_jump_instr_table_0_1,@function ; CHECK: __llvm_jump_instr_table_0_1: ; CHECK: jmp {{f|g|h}}@PLT -; CHECK: .globl __llvm_jump_instr_table_0_2 ; CHECK: .align 8, 0x90 ; CHECK: .type __llvm_jump_instr_table_0_2,@function ; CHECK: __llvm_jump_instr_table_0_2: ; CHECK: jmp {{f|g|h}}@PLT -; CHECK: .globl __llvm_jump_instr_table_0_3 ; CHECK: .align 8, 0x90 ; CHECK: .type __llvm_jump_instr_table_0_3,@function ; CHECK: __llvm_jump_instr_table_0_3: Index: test/CodeGen/X86/jump_tables.ll =================================================================== --- test/CodeGen/X86/jump_tables.ll +++ test/CodeGen/X86/jump_tables.ll @@ -7,6 +7,20 @@ %struct.fun_struct = type { i32 (...)* } +@a = global [12 x i32 () *] [ i32 ()* bitcast (void ()* @indirect_fun to i32 ()*), + i32 ()* bitcast (void ()* @indirect_fun_match to i32 ()*), + i32 ()* bitcast (i32 ()* @indirect_fun_i32 to i32 ()*), + i32 ()* bitcast (i32 (i32)* @indirect_fun_i32_1 to i32 ()*), + i32 ()* bitcast (i32 (i32, i32)* @indirect_fun_i32_2 to i32 ()*), + i32 ()* bitcast (i32* (i32*, i32)* @indirect_fun_i32S_2 to i32 ()*), + i32 ()* bitcast (void (%struct.fun_struct)* @indirect_fun_struct to i32 ()*), + i32 ()* bitcast (void (i32 (...)*, i32)* @indirect_fun_fun to i32 ()*), + i32 ()* bitcast (i32 (i32 (...)*, i32)* @indirect_fun_fun_ret to i32 ()*), + i32 ()* bitcast (void ([19 x i8])* @indirect_fun_array to i32 ()*), + i32 ()* bitcast (void (<3 x i32>)* @indirect_fun_vec to i32 ()*), + i32 ()* bitcast (void (<4 x float>)* @indirect_fun_vec_2 to i32 ()*) + ] + define void @indirect_fun() unnamed_addr jumptable { ret void } @@ -74,62 +88,50 @@ ret i32 %a } -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_1 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_1,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_1: ; SINGLE-DAG: jmp indirect_fun_array@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_2 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_2,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_2: ; SINGLE-DAG: jmp indirect_fun_i32_2@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_3 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_3,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_3: ; SINGLE-DAG: jmp indirect_fun_vec_2@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_4 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_4,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_4: ; SINGLE-DAG: jmp indirect_fun_i32S_2@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_5 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_5,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_5: ; SINGLE-DAG: jmp indirect_fun_struct@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_6 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_6,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_6: ; SINGLE-DAG: jmp indirect_fun_i32_1@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_7 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_7,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_7: ; SINGLE-DAG: jmp indirect_fun_i32@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_8 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_8,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_8: ; SINGLE-DAG: jmp indirect_fun_fun@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_9 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_9,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_9: ; SINGLE-DAG: jmp indirect_fun_fun_ret@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_10 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_10,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_10: ; SINGLE-DAG: jmp indirect_fun@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_11 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_11,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_11: ; SINGLE-DAG: jmp indirect_fun_match@PLT -; SINGLE-DAG: .globl __llvm_jump_instr_table_0_12 ; SINGLE-DAG: .align 8, 0x90 ; SINGLE-DAG: .type __llvm_jump_instr_table_0_12,@function ; SINGLE-DAG: __llvm_jump_instr_table_0_12: @@ -144,82 +146,69 @@ ; SINGLE-DAG: ud2 -; ARITY-DAG: .globl __llvm_jump_instr_table_2_1 ; ARITY-DAG: .align 8, 0x90 ; ARITY-DAG: .type __llvm_jump_instr_table_2_1,@function ; ARITY-DAG: __llvm_jump_instr_table_2_1: ; ARITY-DAG: jmp indirect_fun{{.*}}@PLT ; ARITY-DAG: .align 8, 0x90 ; ARITY-DAG: ud2 -; ARITY-DAG: .globl __llvm_jump_instr_table_0_1 ; ARITY-DAG: .align 8, 0x90 ; ARITY-DAG: .type __llvm_jump_instr_table_0_1,@function ; ARITY-DAG: __llvm_jump_instr_table_0_1: ; ARITY-DAG: jmp indirect_fun{{.*}}@PLT -; ARITY-DAG: .globl __llvm_jump_instr_table_1_1 ; ARITY-DAG: .align 8, 0x90 ; ARITY-DAG: .type __llvm_jump_instr_table_1_1,@function ; ARITY-DAG: __llvm_jump_instr_table_1_1: ; ARITY-DAG: jmp indirect_fun{{.*}}@PLT -; SIMPL-DAG: .globl __llvm_jump_instr_table_2_1 ; SIMPL-DAG: .align 8, 0x90 ; SIMPL-DAG: .type __llvm_jump_instr_table_2_1,@function ; SIMPL-DAG: __llvm_jump_instr_table_2_1: ; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT ; SIMPL-DAG: .align 8, 0x90 ; SIMPL-DAG: ud2 -; SIMPL-DAG: .globl __llvm_jump_instr_table_0_1 ; SIMPL-DAG: .align 8, 0x90 ; SIMPL-DAG: .type __llvm_jump_instr_table_0_1,@function ; SIMPL-DAG: __llvm_jump_instr_table_0_1: ; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT -; SIMPL-DAG: .globl __llvm_jump_instr_table_1_1 ; SIMPL-DAG: .align 8, 0x90 ; SIMPL-DAG: .type __llvm_jump_instr_table_1_1,@function ; SIMPL-DAG: __llvm_jump_instr_table_1_1: ; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT -; SIMPL-DAG: .globl __llvm_jump_instr_table_3_1 ; SIMPL-DAG: .align 8, 0x90 ; SIMPL-DAG: .type __llvm_jump_instr_table_3_1,@function ; SIMPL-DAG: __llvm_jump_instr_table_3_1: ; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT -; SIMPL-DAG: .globl __llvm_jump_instr_table_4_1 ; SIMPL-DAG: .align 8, 0x90 ; SIMPL-DAG: .type __llvm_jump_instr_table_4_1,@function ; SIMPL-DAG: __llvm_jump_instr_table_4_1: ; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT -; FULL-DAG: .globl __llvm_jump_instr_table_10_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_10_1,@function ; FULL-DAG:__llvm_jump_instr_table_10_1: ; FULL-DAG: jmp indirect_fun_i32_1@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_9_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_9_1,@function ; FULL-DAG:__llvm_jump_instr_table_9_1: ; FULL-DAG: jmp indirect_fun_i32_2@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_7_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_7_1,@function ; FULL-DAG:__llvm_jump_instr_table_7_1: ; FULL-DAG: jmp indirect_fun_i32S_2@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_3_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_3_1,@function ; FULL-DAG:__llvm_jump_instr_table_3_1: ; FULL-DAG: jmp indirect_fun_vec_2@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_2_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_2_1,@function ; FULL-DAG:__llvm_jump_instr_table_2_1: @@ -228,42 +217,36 @@ ; FULL-DAG: ud2 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_8_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_8_1,@function ; FULL-DAG:__llvm_jump_instr_table_8_1: ; FULL-DAG: jmp indirect_fun_i32@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_1_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_1_1,@function ; FULL-DAG:__llvm_jump_instr_table_1_1: ; FULL-DAG: jmp indirect_fun_array@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_0_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_0_1,@function ; FULL-DAG:__llvm_jump_instr_table_0_1: ; FULL-DAG: jmp indirect_fun_vec@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_6_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_6_1,@function ; FULL-DAG:__llvm_jump_instr_table_6_1: ; FULL-DAG: jmp indirect_fun_struct@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_5_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_5_1,@function ; FULL-DAG:__llvm_jump_instr_table_5_1: ; FULL-DAG: jmp indirect_fun_fun@PLT ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: ud2 -; FULL-DAG: .globl __llvm_jump_instr_table_4_1 ; FULL-DAG: .align 8, 0x90 ; FULL-DAG: .type __llvm_jump_instr_table_4_1,@function ; FULL-DAG:__llvm_jump_instr_table_4_1: Index: test/LTO/jump-table-type.ll =================================================================== --- test/LTO/jump-table-type.ll +++ test/LTO/jump-table-type.ll @@ -2,8 +2,8 @@ ; RUN: llvm-lto -o %t2 %t1 -jump-table-type=arity ; RUN: llvm-nm %t2 | FileCheck %s -; CHECK: T __llvm_jump_instr_table_0_1 -; CHECK: T __llvm_jump_instr_table_1_1 +; CHECK: t __llvm_jump_instr_table_0_1 +; CHECK: t __llvm_jump_instr_table_1_1 target triple = "x86_64-unknown-linux-gnu"