Index: docs/LangRef.rst =================================================================== --- docs/LangRef.rst +++ docs/LangRef.rst @@ -12172,6 +12172,31 @@ Lowering for ``@llvm.experimental.deoptimize`` is not yet implemented, and is a work in progress. +'``llvm.load.relative``' Intrinsic +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +:: + + declare i8* @llvm.load.relative.iN(i8* %ptr, iN %offset) argmemonly nounwind readonly + +Overview: +""""""""" + +This intrinsic loads a 32-bit value from the address ``%ptr + %offset``, +adds ``%ptr`` to that value and returns it. The constant folder specifically +recognizes the form of this intrinsic and the constant initializers it may +load from; if a loaded constant initializer is known to have the form +``i32 trunc(x - %ptr)``, the intrinsic call is folded to ``x``. + +LLVM provides that the calculation of such a constant initializer will +not overflow at link time under the medium code model if ``x`` is an +``unnamed_addr`` function, however it does not provide the same for a constant +initializer folded into a function body; this intrinsic can be used to avoid +the possibility of overflows when loading from such a constant. + Stack Map Intrinsics -------------------- Index: include/llvm/CodeGen/Passes.h =================================================================== --- include/llvm/CodeGen/Passes.h +++ include/llvm/CodeGen/Passes.h @@ -664,6 +664,11 @@ /// ModulePass *createLowerEmuTLSPass(const TargetMachine *TM); + /// This pass lowers the @llvm.load.relative intrinsic to instructions. + /// This is unsafe to do earlier because a pass may combine the constant + /// initializer into the load, which may result in an overflowing evaluation. + ModulePass *createPreISelIntrinsicLoweringPass(); + /// GlobalMerge - This pass merges internal (by default) globals into structs /// to enable reuse of a base pointer by indexed addressing modes. /// It can also be configured to focus on size optimizations only. Index: include/llvm/IR/Intrinsics.td =================================================================== --- include/llvm/IR/Intrinsics.td +++ include/llvm/IR/Intrinsics.td @@ -663,6 +663,9 @@ def int_bitset_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty], [IntrNoMem]>; +def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty], + [IntrReadArgMem]>; + //===----------------------------------------------------------------------===// // Target-specific intrinsics //===----------------------------------------------------------------------===// Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -241,6 +241,7 @@ void initializePostOrderFunctionAttrsLegacyPassPass(PassRegistry&); void initializePostRASchedulerPass(PassRegistry&); void initializePostMachineSchedulerPass(PassRegistry&); +void initializePreISelIntrinsicLoweringPass(PassRegistry&); void initializePrintFunctionPassWrapperPass(PassRegistry&); void initializePrintModulePassWrapperPass(PassRegistry&); void initializePrintBasicBlockPassPass(PassRegistry&); Index: lib/Analysis/InlineCost.cpp =================================================================== --- lib/Analysis/InlineCost.cpp +++ lib/Analysis/InlineCost.cpp @@ -861,6 +861,11 @@ default: return Base::visitCallSite(CS); + case Intrinsic::load_relative: + // This is normally lowered to 4 LLVM instructions. + Cost += 3 * InlineConstants::InstrCost; + return false; + case Intrinsic::memset: case Intrinsic::memcpy: case Intrinsic::memmove: Index: lib/Analysis/InstructionSimplify.cpp =================================================================== --- lib/Analysis/InstructionSimplify.cpp +++ lib/Analysis/InstructionSimplify.cpp @@ -3788,6 +3788,61 @@ } } +static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, + const DataLayout &DL) { + GlobalValue *PtrSym; + APInt PtrOffset; + if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) + return nullptr; + + Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); + Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); + Type *Int32PtrTy = Int32Ty->getPointerTo(); + Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); + + if (!isa(Offset)) + return nullptr; + + uint64_t OffsetInt = cast(Offset)->getSExtValue(); + if (OffsetInt % 4 != 0) + return nullptr; + + Constant *C = ConstantExpr::getGetElementPtr( + Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), + ConstantInt::get(Int64Ty, OffsetInt / 4)); + Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); + if (!Loaded) + return nullptr; + + auto *LoadedCE = dyn_cast(Loaded); + if (!LoadedCE) + return nullptr; + + if (LoadedCE->getOpcode() == Instruction::Trunc) { + LoadedCE = dyn_cast(LoadedCE->getOperand(0)); + if (!LoadedCE) + return nullptr; + } + + if (LoadedCE->getOpcode() != Instruction::Sub) + return nullptr; + + auto *LoadedLHS = dyn_cast(LoadedCE->getOperand(0)); + if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) + return nullptr; + auto *LoadedLHSPtr = LoadedLHS->getOperand(0); + + Constant *LoadedRHS = LoadedCE->getOperand(1); + GlobalValue *LoadedRHSSym; + APInt LoadedRHSOffset; + if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset, + DL) || + PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) + return nullptr; + + return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); +} + template static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd, const Query &Q, unsigned MaxRecurse) { @@ -3828,6 +3883,11 @@ if (match(RHS, m_Undef())) return Constant::getNullValue(ReturnType); } + + if (IID == Intrinsic::load_relative && isa(LHS) && + isa(RHS)) + return SimplifyRelativeLoad(cast(LHS), cast(RHS), + Q.DL); } // Perform idempotent optimizations Index: lib/CodeGen/CMakeLists.txt =================================================================== --- lib/CodeGen/CMakeLists.txt +++ lib/CodeGen/CMakeLists.txt @@ -85,6 +85,7 @@ PHIElimination.cpp PHIEliminationUtils.cpp PostRASchedulerList.cpp + PreISelIntrinsicLowering.cpp ProcessImplicitDefs.cpp PrologEpilogInserter.cpp PseudoSourceValue.cpp Index: lib/CodeGen/CodeGen.cpp =================================================================== --- lib/CodeGen/CodeGen.cpp +++ lib/CodeGen/CodeGen.cpp @@ -61,6 +61,7 @@ initializePeepholeOptimizerPass(Registry); initializePostMachineSchedulerPass(Registry); initializePostRASchedulerPass(Registry); + initializePreISelIntrinsicLoweringPass(Registry); initializeProcessImplicitDefsPass(Registry); initializeRegisterCoalescerPass(Registry); initializeShrinkWrapPass(Registry); Index: lib/CodeGen/LLVMTargetMachine.cpp =================================================================== --- lib/CodeGen/LLVMTargetMachine.cpp +++ lib/CodeGen/LLVMTargetMachine.cpp @@ -102,6 +102,8 @@ if (TM->Options.EmulatedTLS) PM.add(createLowerEmuTLSPass(TM)); + PM.add(createPreISelIntrinsicLoweringPass()); + // Add internal analysis passes from the target machine. PM.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis())); Index: lib/CodeGen/PreISelIntrinsicLowering.cpp =================================================================== --- /dev/null +++ lib/CodeGen/PreISelIntrinsicLowering.cpp @@ -0,0 +1,85 @@ +//===-- PreISelIntrinsicLowering.cpp - Pre-ISel intrinsic lowering pass ---===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass implements IR lowering for the llvm.load.relative intrinsic. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/Passes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Module.h" +#include "llvm/Pass.h" + +using namespace llvm; + +namespace { + +bool lowerLoadRelative(Function &F) { + if (F.use_empty()) + return false; + + bool Changed = false; + Type *Int32Ty = Type::getInt32Ty(F.getContext()); + Type *Int32PtrTy = Int32Ty->getPointerTo(); + Type *Int8Ty = Type::getInt8Ty(F.getContext()); + + for (auto I = F.use_begin(), E = F.use_end(); I != E;) { + auto CI = dyn_cast(I->getUser()); + ++I; + if (!CI || CI->getCalledValue() != &F) + continue; + + IRBuilder<> B(CI); + Value *OffsetPtr = + B.CreateGEP(Int8Ty, CI->getArgOperand(0), CI->getArgOperand(1)); + Value *OffsetPtrI32 = B.CreateBitCast(OffsetPtr, Int32PtrTy); + Value *OffsetI32 = B.CreateAlignedLoad(OffsetPtrI32, 4); + + Value *ResultPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), OffsetI32); + + CI->replaceAllUsesWith(ResultPtr); + CI->eraseFromParent(); + Changed = true; + } + + return Changed; +} + +bool lowerIntrinsics(Module &M) { + bool Changed = false; + for (Function &F : M) { + if (F.getName().startswith("llvm.load.relative.")) + Changed |= lowerLoadRelative(F); + } + return Changed; +} + +class PreISelIntrinsicLowering : public ModulePass { +public: + static char ID; + PreISelIntrinsicLowering() : ModulePass(ID) {} + + bool runOnModule(Module &M) { + return lowerIntrinsics(M); + } +}; + +char PreISelIntrinsicLowering::ID; + +} + +INITIALIZE_PASS(PreISelIntrinsicLowering, "pre-isel-intrinsic-lowering", + "Pre-ISel Intrinsic Lowering", false, false) + +ModulePass *llvm::createPreISelIntrinsicLoweringPass() { + return new PreISelIntrinsicLowering; +} Index: test/CodeGen/Generic/stop-after.ll =================================================================== --- test/CodeGen/Generic/stop-after.ll +++ test/CodeGen/Generic/stop-after.ll @@ -6,6 +6,6 @@ ; STOP-NEXT: Machine Function Analysis ; STOP-NEXT: MIR Printing Pass -; START: -machine-branch-prob -gc-lowering +; START: -machine-branch-prob -pre-isel-intrinsic-lowering ; START: FunctionPass Manager ; START-NEXT: Lower Garbage Collection Instructions Index: test/Transforms/InstSimplify/load-relative-32.ll =================================================================== --- /dev/null +++ test/Transforms/InstSimplify/load-relative-32.ll @@ -0,0 +1,19 @@ +; RUN: opt < %s -instsimplify -S | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32" +target triple = "i386-unknown-linux-gnu" + +@a = external global i8 + +@c1 = constant [3 x i32] [i32 0, i32 0, +i32 sub (i32 ptrtoint (i8* @a to i32), i32 ptrtoint (i32* getelementptr ([3 x i32], [3 x i32]* @c1, i32 0, i32 2) to i32)) +] + +; CHECK: @f1 +define i8* @f1() { + ; CHECK: ret i8* @a + %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([3 x i32], [3 x i32]* @c1, i32 0, i32 2) to i8*), i32 0) + ret i8* %l +} + +declare i8* @llvm.load.relative.i32(i8*, i32) Index: test/Transforms/InstSimplify/load-relative.ll =================================================================== --- /dev/null +++ test/Transforms/InstSimplify/load-relative.ll @@ -0,0 +1,75 @@ +; RUN: opt < %s -instsimplify -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@a = external global i8 +@b = external global i8 + +@c1 = constant i32 trunc (i64 sub (i64 ptrtoint (i8* @a to i64), i64 ptrtoint (i32* @c1 to i64)) to i32) +@c2 = constant [7 x i32] [i32 0, i32 0, +i32 trunc (i64 sub (i64 ptrtoint (i8* @a to i64), i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32), +i32 trunc (i64 sub (i64 ptrtoint (i8* @b to i64), i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32), +i32 trunc (i64 add (i64 ptrtoint (i8* @b to i64), i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32), +i32 trunc (i64 sub (i64 ptrtoint (i8* @b to i64), i64 1) to i32), +i32 trunc (i64 sub (i64 0, i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32) +] + +; CHECK: @f1 +define i8* @f1() { + ; CHECK: ret i8* @a + %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* @c1 to i8*), i32 0) + ret i8* %l +} + +; CHECK: @f2 +define i8* @f2() { + ; CHECK: ret i8* @a + %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 0) + ret i8* %l +} + +; CHECK: @f3 +define i8* @f3() { + ; CHECK: ret i8* @b + %l = call i8* @llvm.load.relative.i64(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i64 4) + ret i8* %l +} + +; CHECK: @f4 +define i8* @f4() { + ; CHECK: ret i8* % + %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 1) + ret i8* %l +} + +; CHECK: @f5 +define i8* @f5() { + ; CHECK: ret i8* % + %l = call i8* @llvm.load.relative.i32(i8* zeroinitializer, i32 0) + ret i8* %l +} + +; CHECK: @f6 +define i8* @f6() { + ; CHECK: ret i8* % + %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 8) + ret i8* %l +} + +; CHECK: @f7 +define i8* @f7() { + ; CHECK: ret i8* % + %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 12) + ret i8* %l +} + +; CHECK: @f8 +define i8* @f8() { + ; CHECK: ret i8* % + %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 16) + ret i8* %l +} + +declare i8* @llvm.load.relative.i32(i8*, i32) +declare i8* @llvm.load.relative.i64(i8*, i64) Index: test/Transforms/PreISelIntrinsicLowering/load-relative.ll =================================================================== --- /dev/null +++ test/Transforms/PreISelIntrinsicLowering/load-relative.ll @@ -0,0 +1,26 @@ +; RUN: opt -pre-isel-intrinsic-lowering -S -o - %s | FileCheck %s + +; CHECK: define i8* @foo32(i8* [[P:%.*]], i32 [[O:%.*]]) +define i8* @foo32(i8* %p, i32 %o) { + ; CHECK: [[OP:%.*]] = getelementptr i8, i8* [[P]], i32 [[O]] + ; CHECK: [[OPI32:%.*]] = bitcast i8* [[OP]] to i32* + ; CHECK: [[OI32:%.*]] = load i32, i32* [[OPI32]], align 4 + ; CHECK: [[R:%.*]] = getelementptr i8, i8* [[P]], i32 [[OI32]] + ; CHECK: ret i8* [[R]] + %l = call i8* @llvm.load.relative.i32(i8* %p, i32 %o) + ret i8* %l +} + +; CHECK: define i8* @foo64(i8* [[P:%.*]], i64 [[O:%.*]]) +define i8* @foo64(i8* %p, i64 %o) { + ; CHECK: [[OP:%.*]] = getelementptr i8, i8* [[P]], i64 [[O]] + ; CHECK: [[OPI32:%.*]] = bitcast i8* [[OP]] to i32* + ; CHECK: [[OI32:%.*]] = load i32, i32* [[OPI32]], align 4 + ; CHECK: [[R:%.*]] = getelementptr i8, i8* [[P]], i32 [[OI32]] + ; CHECK: ret i8* [[R]] + %l = call i8* @llvm.load.relative.i64(i8* %p, i64 %o) + ret i8* %l +} + +declare i8* @llvm.load.relative.i32(i8*, i32) +declare i8* @llvm.load.relative.i64(i8*, i64) Index: tools/opt/opt.cpp =================================================================== --- tools/opt/opt.cpp +++ tools/opt/opt.cpp @@ -337,6 +337,7 @@ initializeDwarfEHPreparePass(Registry); initializeSafeStackPass(Registry); initializeSjLjEHPreparePass(Registry); + initializePreISelIntrinsicLoweringPass(Registry); #ifdef LINK_POLLY_INTO_TOOLS polly::initializePollyPasses(Registry);