Index: lib/Target/AMDGPU/AMDGPU.h =================================================================== --- lib/Target/AMDGPU/AMDGPU.h +++ lib/Target/AMDGPU/AMDGPU.h @@ -51,6 +51,7 @@ FunctionPass *createSIInsertWaitcntsPass(); FunctionPass *createAMDGPUCodeGenPreparePass(); FunctionPass *createAMDGPUMachineCFGStructurizerPass(); +FunctionPass *createAMDGPURewriteOutArgumentsPass(); void initializeAMDGPUMachineCFGStructurizerPass(PassRegistry&); extern char &AMDGPUMachineCFGStructurizerID; @@ -65,6 +66,9 @@ void initializeAMDGPULowerIntrinsicsPass(PassRegistry &); extern char &AMDGPULowerIntrinsicsID; +void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &); +extern char &AMDGPURewriteOutArgumentsID; + void initializeSIFoldOperandsPass(PassRegistry &); extern char &SIFoldOperandsID; Index: lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp =================================================================== --- /dev/null +++ lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp @@ -0,0 +1,375 @@ +//===-- AMDGPURewriteOutArgumentsPass.cpp - Create struct returns ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file This pass attempts to replace out argument usage with a return of a +/// struct. +/// +/// We can support returning a lot of values directly in registers, but +/// idiomatic C code frequently uses a pointer argument to return a second value +/// rather than returning a struct by value. GPU stack access is also quite +/// painful, so we want to avoid that if possible. Passing a stack object +/// pointer to a function also requires an additional address expansion code +/// sequence to convert the pointer to be relative to the kernel's scratch wave +/// offset register since the callee doesn't know what stack frame the incoming +/// pointer is relative to. +/// +/// The goal is to try rewriting code that looks like this: +/// +/// int foo(int a, int b, int* out) { +/// *out = bar(); +/// return a + b; +/// } +/// +/// into something like this: +/// +/// std::pair foo(int a, int b) { +/// return std::make_pair(a + b, bar()); +/// } +/// +/// Typically the incoming pointer is a simple alloca for a temporary variable +/// to use the API, which if replaced with a struct return will be easily SROA'd +/// out when the stub function we create is inlined +/// +/// This pass introduces the struct return, but leaves the unused pointer +/// arguments and introduces a new stub function calling the struct returning +/// body. DeadArgumentElimination should be run after this to clean these up. +// +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "Utils/AMDGPUBaseInfo.h" + +#include "llvm/Analysis/MemoryDependenceAnalysis.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Module.h" +#include "llvm/Transforms/Utils/Cloning.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "amdgpu-rewrite-out-arguments" + +using namespace llvm; + +namespace { + +static cl::opt AnyAddressSpace( + "amdgpu-any-address-space-out-arguments", + cl::desc("Replace pointer out arguments with " + "struct returns for non-private address space"), + cl::Hidden, + cl::init(false)); + +static cl::opt MaxNumRetRegs( + "amdgpu-max-return-arg-num-regs", + cl::desc("Approximately limit number of return registers for replacing out arguments"), + cl::Hidden, + cl::init(16)); + +STATISTIC(NumOutArgumentsReplaced, + "Number out arguments moved to struct return values"); +STATISTIC(NumOutArgumentFunctionsReplaced, + "Number of functions with out arguments moved to struct return values"); + +class AMDGPURewriteOutArguments : public FunctionPass { +private: + const DataLayout *DL = nullptr; + MemoryDependenceResults *MDA = nullptr; + + bool isOutArgumentCandidate(Argument &Arg) const; + +public: + static char ID; + + AMDGPURewriteOutArguments() : + FunctionPass(ID) {} + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + FunctionPass::getAnalysisUsage(AU); + } + + bool doInitialization(Module &M) override; + bool runOnFunction(Function &M) override; +}; + +} // End anonymous namespace + +INITIALIZE_PASS_BEGIN(AMDGPURewriteOutArguments, DEBUG_TYPE, + "AMDGPU Rewrite Out Arguments", false, false) +INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) +INITIALIZE_PASS_END(AMDGPURewriteOutArguments, DEBUG_TYPE, + "AMDGPU Rewrite Out Arguments", false, false) + +char AMDGPURewriteOutArguments::ID = 0; + +bool AMDGPURewriteOutArguments::isOutArgumentCandidate(Argument &Arg) const { + const int MaxUses = 10; + const unsigned MaxOutArgSizeBytes = 4 * MaxNumRetRegs; + int UseCount = 0; + + PointerType *ArgTy = dyn_cast(Arg.getType()); + + // TODO: It might be useful for any out arguments, not just privates. + if (!ArgTy || (ArgTy->getAddressSpace() != DL->getAllocaAddrSpace() && + !AnyAddressSpace) || + Arg.hasByValAttr() || Arg.hasStructRetAttr() || + DL->getTypeStoreSize(ArgTy->getPointerElementType()) > MaxOutArgSizeBytes) { + return false; + } + + for (Use &U : Arg.uses()) { + StoreInst *SI = dyn_cast(U.getUser()); + if (UseCount > MaxUses) + return false; + + if (!SI || !SI->isSimple() || + U.getOperandNo() != StoreInst::getPointerOperandIndex()) + return false; + + ++UseCount; + } + + // Skip unused arguments. + return UseCount > 0; +} + +bool AMDGPURewriteOutArguments::doInitialization(Module &M) { + DL = &M.getDataLayout(); + return false; +} + +bool AMDGPURewriteOutArguments::runOnFunction(Function &F) { + if (skipFunction(F)) + return false; + + // TODO: Could probably handle variadic functions. + if (F.isVarArg() || F.hasStructRetAttr() || + AMDGPU::isEntryFunctionCC(F.getCallingConv())) + return false; + + MDA = &getAnalysis().getMemDep(); + + unsigned ReturnNumRegs = 0; + SmallSet OutArgIndexes; + SmallVector ReturnTypes; + Type *RetTy = F.getReturnType(); + if (!RetTy->isVoidTy()) { + ReturnNumRegs = DL->getTypeStoreSize(RetTy) / 4; + + if (ReturnNumRegs >= MaxNumRetRegs) + return false; + + ReturnTypes.push_back(RetTy); + } + + SmallVector OutArgs; + for (Argument &Arg : F.args()) { + if (isOutArgumentCandidate(Arg)) { + DEBUG(dbgs() << "Found possible out argument " << Arg + << " in function " << F.getName() << '\n'); + OutArgs.push_back(&Arg); + } + } + + if (OutArgs.empty()) + return false; + + typedef SmallVector, 4> ReplacementVec; + DenseMap Replacements; + + SmallVector Returns; + for (BasicBlock &BB : F) { + if (ReturnInst *RI = dyn_cast(&BB.back())) + Returns.push_back(RI); + } + + if (Returns.empty()) + return false; + + bool Changing; + + do { + Changing = false; + + // Keep retrying if we are able to successfully eliminate an argument. This + // helps with cases with multiple arguments which may alias, such as in a + // sincos implemntation. If we have 2 stores to arguments, on the first + // attempt the MDA query will succeed for the second store but not the + // first. On the second iteration we've removed that out clobbering argument + // (by effectively moving it into another function) and will find the second + // argument is OK to move. + for (Argument *OutArg : OutArgs) { + bool ThisReplaceable = true; + SmallVector, 4> ReplaceableStores; + + Type *ArgTy = OutArg->getType()->getPointerElementType(); + + // Skip this argument if converting it will push us over the register + // count to return limit. + + // TODO: This is an approximation. When legalized this could be more. We + // can ask TLI for exactly how many. + unsigned ArgNumRegs = DL->getTypeStoreSize(ArgTy) / 4; + if (ArgNumRegs + ReturnNumRegs > MaxNumRetRegs) + continue; + + // An argument is convertible only if all exit blocks are able to replace + // it. + for (ReturnInst *RI : Returns) { + BasicBlock *BB = RI->getParent(); + + MemDepResult Q = MDA->getPointerDependencyFrom(MemoryLocation(OutArg), + true, BB->end(), BB, RI); + StoreInst *SI = nullptr; + if (Q.isDef()) + SI = dyn_cast(Q.getInst()); + + if (SI) { + DEBUG(dbgs() << "Found out argument store: " << *SI << '\n'); + ReplaceableStores.emplace_back(RI, SI); + } else { + ThisReplaceable = false; + break; + } + } + + if (!ThisReplaceable) + continue; // Try the next argument candidate. + + for (std::pair Store : ReplaceableStores) { + Value *ReplVal = Store.second->getValueOperand(); + + auto &ValVec = Replacements[Store.first]; + if (llvm::find_if(ValVec, + [OutArg](const std::pair &Entry) { + return Entry.first == OutArg;}) != ValVec.end()) { + DEBUG(dbgs() << "Saw multiple out arg stores" << *OutArg << '\n'); + // It is possible to see stores to the same argument multiple times, + // but we expect these would have been optimized out already. + ThisReplaceable = false; + break; + } + + ValVec.emplace_back(OutArg, ReplVal); + Store.second->eraseFromParent(); + } + + if (ThisReplaceable) { + ReturnTypes.push_back(ArgTy); + OutArgIndexes.insert(OutArg->getArgNo()); + ++NumOutArgumentsReplaced; + Changing = true; + } + } + } while (Changing); + + if (Replacements.empty()) + return false; + + LLVMContext &Ctx = F.getParent()->getContext(); + StructType *NewRetTy = StructType::create(Ctx, ReturnTypes, F.getName()); + + FunctionType *NewFuncTy = FunctionType::get(NewRetTy, + F.getFunctionType()->params(), + F.isVarArg()); + + DEBUG(dbgs() << "Computed new return type: " << *NewRetTy << '\n'); + + Function *NewFunc = Function::Create(NewFuncTy, Function::PrivateLinkage, + F.getName() + ".body"); + F.getParent()->getFunctionList().insert(F.getIterator(), NewFunc); + NewFunc->copyAttributesFrom(&F); + NewFunc->setComdat(F.getComdat()); + + // We want to preserve the function and param attributes, but need to strip + // off any return attributes, e.g. zeroext doesn't make sense with a struct. + NewFunc->stealArgumentListFrom(F); + + AttrBuilder RetAttrs; + RetAttrs.addAttribute(Attribute::SExt); + RetAttrs.addAttribute(Attribute::ZExt); + RetAttrs.addAttribute(Attribute::NoAlias); + NewFunc->removeAttributes(AttributeList::ReturnIndex, RetAttrs); + // TODO: How to preserve metadata? + + // Move the body of the function into the new rewritten function, and replace + // this function with a stub. + NewFunc->getBasicBlockList().splice(NewFunc->begin(), F.getBasicBlockList()); + + for (std::pair &Replacement : Replacements) { + ReturnInst *RI = Replacement.first; + IRBuilder<> B(RI); + B.SetCurrentDebugLocation(RI->getDebugLoc()); + + int RetIdx = 0; + Value *NewRetVal = UndefValue::get(NewRetTy); + + Value *RetVal = RI->getReturnValue(); + if (RetVal) + NewRetVal = B.CreateInsertValue(NewRetVal, RetVal, RetIdx++); + + for (std::pair ReturnPoint : Replacement.second) { + NewRetVal = B.CreateInsertValue(NewRetVal, ReturnPoint.second, RetIdx++); + } + + if (RetVal) + RI->setOperand(0, NewRetVal); + else { + B.CreateRet(NewRetVal); + RI->eraseFromParent(); + } + } + + SmallVector StubCallArgs; + for (Argument &Arg : F.args()) { + if (OutArgIndexes.count(Arg.getArgNo())) { + // It's easier to preserve the type of the argument list. We rely on + // DeadArgumentElimination to take care of these. + StubCallArgs.push_back(UndefValue::get(Arg.getType())); + } else { + StubCallArgs.push_back(&Arg); + } + } + + BasicBlock *StubBB = BasicBlock::Create(Ctx, "", &F); + IRBuilder<> B(StubBB); + CallInst *StubCall = B.CreateCall(NewFunc, StubCallArgs); + + int RetIdx = RetTy->isVoidTy() ? 0 : 1; + for (Argument &Arg : F.args()) { + if (!OutArgIndexes.count(Arg.getArgNo())) + continue; + + auto *EltTy = Arg.getType()->getPointerElementType(); + unsigned Align = Arg.getParamAlignment(); + if (Align == 0) + Align = DL->getABITypeAlignment(EltTy); + + Value *Val = B.CreateExtractValue(StubCall, RetIdx++); + B.CreateAlignedStore(Val, &Arg, Align); + } + + if (!RetTy->isVoidTy()) { + B.CreateRet(B.CreateExtractValue(StubCall, 0)); + } else { + B.CreateRetVoid(); + } + + // The function is now a stub we want to inline. + F.addFnAttr(Attribute::AlwaysInline); + + ++NumOutArgumentFunctionsReplaced; + return true; +} + +FunctionPass *llvm::createAMDGPURewriteOutArgumentsPass() { + return new AMDGPURewriteOutArguments(); +} Index: lib/Target/AMDGPU/AMDGPUTargetMachine.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -66,6 +66,11 @@ cl::desc("Run early if-conversion"), cl::init(false)); +static cl::opt +EnableOutArg("amdgpu-out-arg", cl::Hidden, + cl::desc("Run out arg pass"), + cl::init(true)); + static cl::opt EnableR600IfConvert( "r600-if-convert", cl::desc("Use if conversion pass"), @@ -143,6 +148,7 @@ initializeAMDGPULowerIntrinsicsPass(*PR); initializeAMDGPUPromoteAllocaPass(*PR); initializeAMDGPUCodeGenPreparePass(*PR); + initializeAMDGPURewriteOutArgumentsPass(*PR); initializeAMDGPUUnifyMetadataPass(*PR); initializeSIAnnotateControlFlowPass(*PR); initializeSIInsertWaitsPass(*PR); @@ -348,8 +354,18 @@ [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { // Add infer address spaces pass to the opt pipeline after inlining // but before SROA to increase SROA opportunities. +// if (EnableOutArg) +// PM.add(createAMDGPURewriteOutArgumentsPass()); PM.add(createInferAddressSpacesPass()); }); + + Builder.addExtension( + PassManagerBuilder::EP_ScalarOptimizerLate, + [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { + if (EnableOutArg) + PM.add(createAMDGPURewriteOutArgumentsPass()); + }); + } //===----------------------------------------------------------------------===// Index: lib/Target/AMDGPU/CMakeLists.txt =================================================================== --- lib/Target/AMDGPU/CMakeLists.txt +++ lib/Target/AMDGPU/CMakeLists.txt @@ -62,6 +62,7 @@ AMDGPUPromoteAlloca.cpp AMDGPURegAsmNames.inc.cpp AMDGPURegisterInfo.cpp + AMDGPURewriteOutArguments.cpp AMDGPUUnifyDivergentExitNodes.cpp GCNHazardRecognizer.cpp GCNSchedStrategy.cpp Index: test/CodeGen/AMDGPU/rewrite-out-arguments-address-space.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/rewrite-out-arguments-address-space.ll @@ -0,0 +1,22 @@ +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-any-address-space-out-arguments -amdgpu-rewrite-out-arguments < %s | FileCheck %s + + +; CHECK: %void_one_out_non_private_arg_i32_1_use = type { i32 } + + +; CHECK-LABEL: define private %void_one_out_non_private_arg_i32_1_use @void_one_out_non_private_arg_i32_1_use.body(i32 addrspace(1)* %val) #0 { +; CHECK-NEXT: ret %void_one_out_non_private_arg_i32_1_use zeroinitializer + +; CHECK-LABEL: define void @void_one_out_non_private_arg_i32_1_use(i32 addrspace(1)*) #1 { +; CHECK-NEXT: %2 = call %void_one_out_non_private_arg_i32_1_use @void_one_out_non_private_arg_i32_1_use.body(i32 addrspace(1)* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_non_private_arg_i32_1_use %2, 0 +; CHECK-NEXT: store i32 %3, i32 addrspace(1)* %0, align 4 +; CHECK-NEXT: ret void +define void @void_one_out_non_private_arg_i32_1_use(i32 addrspace(1)* %val) #0 { + store i32 0, i32 addrspace(1)* %val + ret void +} + +; CHECK: attributes #0 = { nounwind } +; CHECK: attributes #1 = { alwaysinline nounwind } +attributes #0 = { nounwind } Index: test/CodeGen/AMDGPU/rewrite-out-arguments.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/rewrite-out-arguments.ll @@ -0,0 +1,563 @@ +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-rewrite-out-arguments < %s | FileCheck %s + +; CHECK: %void_one_out_arg_i32_1_use = type { i32 } +; CHECK: %void_one_out_arg_i32_1_use_align = type { i32 } +; CHECK: %void_one_out_arg_i32_2_use = type { i32 } +; CHECK: %void_one_out_arg_i32_2_stores = type { i32 } +; CHECK: %void_one_out_arg_i32_2_stores_clobber = type { i32 } +; CHECK: %void_one_out_arg_i32_pre_call_may_clobber = type { i32 } +; CHECK: %void_one_out_arg_v2i32_1_use = type { <2 x i32> } +; CHECK: %void_one_out_arg_struct_1_use = type { %struct } +; CHECK: %struct = type { i32, i8, float } +; CHECK: %i32_one_out_arg_i32_1_use = type { i32, i32 } +; CHECK: %unused_different_type = type { float } +; CHECK: %multiple_same_return_noalias = type { i32, i32 } +; CHECK: %multiple_same_return_mayalias = type { i32, i32 } +; CHECK: %multiple_same_return_mayalias_order = type { i32, i32 } +; CHECK: %i1_one_out_arg_i32_1_use = type { i1, i32 } +; CHECK: %i1_zeroext_one_out_arg_i32_1_use = type { i1, i32 } +; CHECK: %i1_signext_one_out_arg_i32_1_use = type { i1, i32 } +; CHECK: %p1i32_noalias_one_out_arg_i32_1_use = type { i32 addrspace(1)*, i32 } +; CHECK: %func_ptr_type = type { void ()* } +; CHECK: %out_arg_small_array = type { [4 x i32] } +; CHECK: %num_regs_reach_limit = type { [15 x i32], i32 } +; CHECK: %num_regs_reach_limit_leftover = type { [15 x i32], i32, i32 } +; CHECK: %preserve_debug_info = type { i32 } +; CHECK: %preserve_metadata = type { i32 } + +; CHECK-LABEL: define void @no_ret_blocks() #0 { +; CHECK-NEXT: unreachable +define void @no_ret_blocks() #0 { + unreachable +} + +; CHECK-LABEL: @void_one_out_arg_i32_no_use( +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_no_use(i32* %val) #0 { + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @skip_byval_arg( +; CHECK-NEXT: store i32 0, i32* %val +; CHECK-NEXT: ret void +define void @skip_byval_arg(i32* byval %val) #0 { + store i32 0, i32* %val + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @skip_optnone( +; CHECK-NEXT: store i32 0, i32* %val +; CHECK-NEXT: ret void +define void @skip_optnone(i32* byval %val) #1 { + store i32 0, i32* %val + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @skip_volatile( +; CHECK-NEXT: store volatile i32 0, i32* %val +; CHECK-NEXT: ret void +define void @skip_volatile(i32* byval %val) #0 { + store volatile i32 0, i32* %val + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @skip_atomic( +; CHECK-NEXT: store atomic i32 0, i32* %val +; CHECK-NEXT: ret void +define void @skip_atomic(i32* byval %val) #0 { + store atomic i32 0, i32* %val seq_cst, align 4 + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @skip_store_pointer_val( +; CHECK-NEXT: store i32* %val, i32** undef +; CHECK-NEXT: ret void +define void @skip_store_pointer_val(i32* %val) #0 { + store i32* %val, i32** undef + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @skip_store_gep( +; CHECK-NEXT: %gep = getelementptr inbounds i32, i32* %val, i32 1 +; CHECK-NEXT: store i32 0, i32* %gep +; CHECK-NEXT: ret void +define void @skip_store_gep(i32* %val) #0 { + %gep = getelementptr inbounds i32, i32* %val, i32 1 + store i32 0, i32* %gep + ret void +} + +; CHECK-LABEL: define void @skip_sret(i32* sret %sret, i32* %out) #0 { +; CHECK-NEXT: store +; CHECK-NEXT: store +; CHECK-NEXT: ret void +define void @skip_sret(i32* sret %sret, i32* %out) #0 { + store i32 1, i32* %sret + store i32 0, i32* %out + ret void +} + +; CHECK-LABEL: define private %void_one_out_arg_i32_1_use @void_one_out_arg_i32_1_use.body(i32* %val) #0 { +; CHECK-NEXT: ret %void_one_out_arg_i32_1_use zeroinitializer + +; CHECK-LABEL: @void_one_out_arg_i32_1_use( +; CHECK-NEXT: %2 = call %void_one_out_arg_i32_1_use @void_one_out_arg_i32_1_use.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_arg_i32_1_use %2, 0 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_1_use(i32* %val) #0 { + store i32 0, i32* %val + ret void +} + +; CHECK-LABEL: define private %void_one_out_arg_i32_1_use_align @void_one_out_arg_i32_1_use_align.body(i32* align 8 %val) #0 { +; CHECK-NEXT: ret %void_one_out_arg_i32_1_use_align zeroinitializer + +; CHECK-LABEL: @void_one_out_arg_i32_1_use_align( +; CHECK-NEXT: %2 = call %void_one_out_arg_i32_1_use_align @void_one_out_arg_i32_1_use_align.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_arg_i32_1_use_align %2, 0 +; CHECK-NEXT: store i32 %3, i32* %0, align 8 +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_1_use_align(i32* align 8 %val) #0 { + store i32 0, i32* %val, align 8 + ret void +} + +; CHECK-LABEL: define private %void_one_out_arg_i32_2_use @void_one_out_arg_i32_2_use.body(i1 %arg0, i32* %val) #0 { +; CHECK: br i1 %arg0, label %ret0, label %ret1 + +; CHECK: ret0: +; CHECK-NEXT: ret %void_one_out_arg_i32_2_use zeroinitializer + +; CHECK: ret1: +; CHECK-NEXT: ret %void_one_out_arg_i32_2_use { i32 9 } + +; CHECK-LABEL: define void @void_one_out_arg_i32_2_use(i1, i32*) #2 { +; CHECK-NEXT: %3 = call %void_one_out_arg_i32_2_use @void_one_out_arg_i32_2_use.body(i1 %0, i32* undef) +; CHECK-NEXT: %4 = extractvalue %void_one_out_arg_i32_2_use %3, 0 +; CHECK-NEXT: store i32 %4, i32* %1, align 4 +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_2_use(i1 %arg0, i32* %val) #0 { + br i1 %arg0, label %ret0, label %ret1 + +ret0: + store i32 0, i32* %val + ret void + +ret1: + store i32 9, i32* %val + ret void +} + +declare void @may.clobber() + +; CHECK-LABEL: define private %void_one_out_arg_i32_2_stores @void_one_out_arg_i32_2_stores.body(i32* %val) #0 { +; CHECK-NEXT: store i32 0, i32* %val +; CHECK-NEXT: ret %void_one_out_arg_i32_2_stores { i32 1 } + +; CHECK-LABEL: define void @void_one_out_arg_i32_2_stores(i32*) #2 { +; CHECK-NEXT: %2 = call %void_one_out_arg_i32_2_stores @void_one_out_arg_i32_2_stores.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_arg_i32_2_stores %2, 0 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +define void @void_one_out_arg_i32_2_stores(i32* %val) #0 { + store i32 0, i32* %val + store i32 1, i32* %val + ret void +} + +; CHECK-LABEL: define private %void_one_out_arg_i32_2_stores_clobber @void_one_out_arg_i32_2_stores_clobber.body(i32* %val) #0 { +; CHECK-NEXT: store i32 0, i32* %val +; CHECK-NEXT: call void @may.clobber() +; CHECK-NEXT: ret %void_one_out_arg_i32_2_stores_clobber { i32 1 } + +; CHECK-LABEL: define void @void_one_out_arg_i32_2_stores_clobber(i32*) #2 { +; CHECK-NEXT: %2 = call %void_one_out_arg_i32_2_stores_clobber @void_one_out_arg_i32_2_stores_clobber.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_arg_i32_2_stores_clobber %2, 0 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_2_stores_clobber(i32* %val) #0 { + store i32 0, i32* %val + call void @may.clobber() + store i32 1, i32* %val + ret void +} + +; CHECK-NOT: define + +; CHECK-LABEL: define void @void_one_out_arg_i32_call_may_clobber(i32* %val) #0 { +; CHECK-NEXT: store i32 0, i32* %val +; CHECK-NEXT: call void @may.clobber() +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_call_may_clobber(i32* %val) #0 { + store i32 0, i32* %val + call void @may.clobber() + ret void +} + +; CHECK-LABEL: define private %void_one_out_arg_i32_pre_call_may_clobber @void_one_out_arg_i32_pre_call_may_clobber.body(i32* %val) #0 { +; CHECK-NEXT: call void @may.clobber() +; CHECK-NEXT: ret %void_one_out_arg_i32_pre_call_may_clobber zeroinitializer + +; CHECK-LABEL: @void_one_out_arg_i32_pre_call_may_clobber(i32*) #2 { +; CHECK-NEXT: %2 = call %void_one_out_arg_i32_pre_call_may_clobber @void_one_out_arg_i32_pre_call_may_clobber.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_arg_i32_pre_call_may_clobber %2, 0 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_pre_call_may_clobber(i32* %val) #0 { + call void @may.clobber() + store i32 0, i32* %val + ret void +} + +; CHECK-LABEL: define void @void_one_out_arg_i32_reload(i32* %val) #0 { +; CHECK: store i32 0, i32* %val +; CHECK: %load = load i32, i32* %val, align 4 +; CHECK: ret void +define void @void_one_out_arg_i32_reload(i32* %val) #0 { + store i32 0, i32* %val + %load = load i32, i32* %val, align 4 + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @void_one_out_arg_i32_store_in_different_block( +; CHECK-NEXT: %load = load i32, i32 addrspace(1)* undef +; CHECK-NEXT: store i32 0, i32* %out +; CHECK-NEXT: br label %ret +; CHECK: ret: +; CHECK-NEXT: ret void +define void @void_one_out_arg_i32_store_in_different_block(i32* %out) #0 { + %load = load i32, i32 addrspace(1)* undef + store i32 0, i32* %out + br label %ret + +ret: + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @unused_out_arg_one_branch( +; CHECK: ret0: +; CHECK-NEXT: ret void + +; CHECK: ret1: +; CHECK-NEXT: store i32 9, i32* %val +; CHECK-NEXT: ret void +define void @unused_out_arg_one_branch(i1 %arg0, i32* %val) #0 { + br i1 %arg0, label %ret0, label %ret1 + +ret0: + ret void + +ret1: + store i32 9, i32* %val + ret void +} + +; CHECK-LABEL: define private %void_one_out_arg_v2i32_1_use @void_one_out_arg_v2i32_1_use.body(<2 x i32>* %val) #0 { +; CHECK-NEXT: ret %void_one_out_arg_v2i32_1_use { <2 x i32> } + +; CHECK-LABEL: define void @void_one_out_arg_v2i32_1_use(<2 x i32>*) #2 { +; CHECK-NEXT: %2 = call %void_one_out_arg_v2i32_1_use @void_one_out_arg_v2i32_1_use.body(<2 x i32>* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_arg_v2i32_1_use %2, 0 +; CHECK-NEXT: store <2 x i32> %3, <2 x i32>* %0, align 8 +; CHCEK-NEXT: ret void +define void @void_one_out_arg_v2i32_1_use(<2 x i32>* %val) #0 { + store <2 x i32> , <2 x i32>* %val + ret void +} + +%struct = type { i32, i8, float } + +; CHECK-LABEL: define private %void_one_out_arg_struct_1_use @void_one_out_arg_struct_1_use.body(%struct* %out) #0 { +; CHECK-NEXT: ret %void_one_out_arg_struct_1_use { %struct { i32 9, i8 99, float 4.000000e+00 } } + +; Normally this is split into element accesses which we don't handle. +; CHECK-LABEL: define void @void_one_out_arg_struct_1_use(%struct*) #2 { +; CHECK-NEXT: %2 = call %void_one_out_arg_struct_1_use @void_one_out_arg_struct_1_use.body(%struct* undef) +; CHECK-NEXT: %3 = extractvalue %void_one_out_arg_struct_1_use %2, 0 +; CHECK-NEXT: store %struct %3, %struct* %0, align 4 +; CHECK-NEXT: ret void +define void @void_one_out_arg_struct_1_use(%struct* %out) #0 { + store %struct { i32 9, i8 99, float 4.0 }, %struct* %out + ret void +} + +; CHECK-LABEL: define private %i32_one_out_arg_i32_1_use @i32_one_out_arg_i32_1_use.body(i32* %val) #0 { +; CHECK-NEXT: ret %i32_one_out_arg_i32_1_use { i32 9, i32 24 } + +; CHECK-LABEL: define i32 @i32_one_out_arg_i32_1_use(i32*) #2 { +; CHECK-NEXT: %2 = call %i32_one_out_arg_i32_1_use @i32_one_out_arg_i32_1_use.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %i32_one_out_arg_i32_1_use %2, 1 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +; CHECK-NEXT: %4 = extractvalue %i32_one_out_arg_i32_1_use %2, 0 +; CHECK-NEXT: ret i32 %4 +define i32 @i32_one_out_arg_i32_1_use(i32* %val) #0 { + store i32 24, i32* %val + ret i32 9 +} + +; CHECK-LABEL: define private %unused_different_type @unused_different_type.body(i32* %arg0, float* nocapture %arg1) #0 { +; CHECK-NEXT: ret %unused_different_type { float 4.000000e+00 } + +; CHECK-LABEL: define void @unused_different_type(i32*, float* nocapture) #2 { +; CHECK-NEXT: %3 = call %unused_different_type @unused_different_type.body(i32* %0, float* undef) +; CHECK-NEXT: %4 = extractvalue %unused_different_type %3, 0 +; CHECK-NEXT: store float %4, float* %1, align 4 +; CHECK-NEXT ret void +define void @unused_different_type(i32* %arg0, float* nocapture %arg1) #0 { + store float 4.0, float* %arg1, align 4 + ret void +} + +; CHECK-LABEL: define private %multiple_same_return_noalias @multiple_same_return_noalias.body(i32* noalias %out0, i32* noalias %out1) #0 { +; CHECK-NEXT: ret %multiple_same_return_noalias { i32 1, i32 2 } + +; CHECK-LABEL: define void @multiple_same_return_noalias( +; CHECK-NEXT: %3 = call %multiple_same_return_noalias @multiple_same_return_noalias.body(i32* undef, i32* undef) +; CHECK-NEXT: %4 = extractvalue %multiple_same_return_noalias %3, 0 +; CHECK-NEXT: store i32 %4, i32* %0, align 4 +; CHECK-NEXT: %5 = extractvalue %multiple_same_return_noalias %3, 1 +; CHECK-NEXT: store i32 %5, i32* %1, align 4 +; CHECK-NEXT: ret void +define void @multiple_same_return_noalias(i32* noalias %out0, i32* noalias %out1) #0 { + store i32 1, i32* %out0, align 4 + store i32 2, i32* %out1, align 4 + ret void +} + +; CHECK-LABEL: define private %multiple_same_return_mayalias @multiple_same_return_mayalias.body(i32* %out0, i32* %out1) #0 { +; CHECK-NEXT: ret %multiple_same_return_mayalias { i32 2, i32 1 } + +; CHECK-LABEL: define void @multiple_same_return_mayalias(i32*, i32*) #2 { +; CHECK-NEXT: %3 = call %multiple_same_return_mayalias @multiple_same_return_mayalias.body(i32* undef, i32* undef) +; CHECK-NEXT: %4 = extractvalue %multiple_same_return_mayalias %3, 0 +; CHECK-NEXT: store i32 %4, i32* %0, align 4 +; CHECK-NEXT: %5 = extractvalue %multiple_same_return_mayalias %3, 1 +; CHECK-NEXT: store i32 %5, i32* %1, align 4 +; CHECK-NEXT: ret void +define void @multiple_same_return_mayalias(i32* %out0, i32* %out1) #0 { + store i32 1, i32* %out0, align 4 + store i32 2, i32* %out1, align 4 + ret void +} + +; CHECK-LABEL: define private %multiple_same_return_mayalias_order @multiple_same_return_mayalias_order.body(i32* %out0, i32* %out1) #0 { +; CHECK-NEXT: ret %multiple_same_return_mayalias_order { i32 1, i32 2 } + +; CHECK-LABEL: define void @multiple_same_return_mayalias_order(i32*, i32*) #2 { +; CHECK-NEXT: %3 = call %multiple_same_return_mayalias_order @multiple_same_return_mayalias_order.body(i32* undef, i32* undef) +; CHECK-NEXT: %4 = extractvalue %multiple_same_return_mayalias_order %3, 0 +; CHECK-NEXT: store i32 %4, i32* %0, align 4 +; CHECK-NEXT: %5 = extractvalue %multiple_same_return_mayalias_order %3, 1 +; CHECK-NEXT: store i32 %5, i32* %1, align 4 +; CHECK-NEXT: ret void +define void @multiple_same_return_mayalias_order(i32* %out0, i32* %out1) #0 { + store i32 2, i32* %out1, align 4 + store i32 1, i32* %out0, align 4 + ret void +} + +; Currently this fails to convert because the store won't be found if +; it isn't in the same block as the return. +; CHECK-LABEL: define i32 @store_in_entry_block(i1 %arg0, i32* %out) #0 { +; CHECK-NOT: call +define i32 @store_in_entry_block(i1 %arg0, i32* %out) #0 { +entry: + %val0 = load i32, i32 addrspace(1)* undef + store i32 %val0, i32* %out + br i1 %arg0, label %if, label %endif + +if: + %val1 = load i32, i32 addrspace(1)* undef + br label %endif + +endif: + %phi = phi i32 [ 0, %entry ], [ %val1, %if ] + ret i32 %phi +} + +; CHECK-LABEL: define private %i1_one_out_arg_i32_1_use @i1_one_out_arg_i32_1_use.body(i32* %val) #0 { +; CHECK-NEXT: ret %i1_one_out_arg_i32_1_use { i1 true, i32 24 } + +; CHECK-LABEL: define i1 @i1_one_out_arg_i32_1_use(i32*) #2 { +; CHECK: %2 = call %i1_one_out_arg_i32_1_use @i1_one_out_arg_i32_1_use.body(i32* undef) +; CHECK: %3 = extractvalue %i1_one_out_arg_i32_1_use %2, 1 +; CHECK: store i32 %3, i32* %0, align 4 +; CHECK: %4 = extractvalue %i1_one_out_arg_i32_1_use %2, 0 +; CHECK: ret i1 %4 +define i1 @i1_one_out_arg_i32_1_use(i32* %val) #0 { + store i32 24, i32* %val + ret i1 true +} + +; Make sure we don't leave around return attributes that are +; incompatible with struct return types. + +; CHECK-LABEL: define private %i1_zeroext_one_out_arg_i32_1_use @i1_zeroext_one_out_arg_i32_1_use.body(i32* %val) #0 { +; CHECK-NEXT: ret %i1_zeroext_one_out_arg_i32_1_use { i1 true, i32 24 } + +; CHECK-LABEL: define zeroext i1 @i1_zeroext_one_out_arg_i32_1_use(i32*) #2 { +; CHECK-NEXT: %2 = call %i1_zeroext_one_out_arg_i32_1_use @i1_zeroext_one_out_arg_i32_1_use.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %i1_zeroext_one_out_arg_i32_1_use %2, 1 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +; CHECK-NEXT: %4 = extractvalue %i1_zeroext_one_out_arg_i32_1_use %2, 0 +; CHECK-NEXT: ret i1 %4 +define zeroext i1 @i1_zeroext_one_out_arg_i32_1_use(i32* %val) #0 { + store i32 24, i32* %val + ret i1 true +} + +; CHECK-LABEL: define private %i1_signext_one_out_arg_i32_1_use @i1_signext_one_out_arg_i32_1_use.body(i32* %val) #0 { +; CHECK-NEXT: ret %i1_signext_one_out_arg_i32_1_use { i1 true, i32 24 } + +; CHECK-LABEL: define signext i1 @i1_signext_one_out_arg_i32_1_use(i32*) #2 { +; CHECK-NEXT: %2 = call %i1_signext_one_out_arg_i32_1_use @i1_signext_one_out_arg_i32_1_use.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %i1_signext_one_out_arg_i32_1_use %2, 1 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +; CHECK-NEXT: %4 = extractvalue %i1_signext_one_out_arg_i32_1_use %2, 0 +; CHECK-NEXT: ret i1 %4 +define signext i1 @i1_signext_one_out_arg_i32_1_use(i32* %val) #0 { + store i32 24, i32* %val + ret i1 true +} + +; CHECK-LABEL: define private %p1i32_noalias_one_out_arg_i32_1_use @p1i32_noalias_one_out_arg_i32_1_use.body(i32* %val) #0 { +; CHECK-NEXT: ret %p1i32_noalias_one_out_arg_i32_1_use { i32 addrspace(1)* null, i32 24 } + +; CHECK-LABEL: define noalias i32 addrspace(1)* @p1i32_noalias_one_out_arg_i32_1_use(i32*) #2 { +; CHECK-NEXT: %2 = call %p1i32_noalias_one_out_arg_i32_1_use @p1i32_noalias_one_out_arg_i32_1_use.body(i32* undef) +; CHECK-NEXT: %3 = extractvalue %p1i32_noalias_one_out_arg_i32_1_use %2, 1 +; CHECK-NEXT: store i32 %3, i32* %0, align 4 +; CHECK-NEXT: %4 = extractvalue %p1i32_noalias_one_out_arg_i32_1_use %2, 0 +; CHECK-NEXT: ret i32 addrspace(1)* %4 +define noalias i32 addrspace(1)* @p1i32_noalias_one_out_arg_i32_1_use(i32* %val) #0 { + store i32 24, i32* %val + ret i32 addrspace(1)* null +} + +; CHECK-LABEL: define void @void_one_out_non_private_arg_i32_1_use(i32 addrspace(1)* %val) #0 { +; CHECK-NEXT: store i32 0, i32 addrspace(1)* %val +; CHECK-NEXT: ret void +define void @void_one_out_non_private_arg_i32_1_use(i32 addrspace(1)* %val) #0 { + store i32 0, i32 addrspace(1)* %val + ret void +} + +; CHECK-LABEL: define private %func_ptr_type @func_ptr_type.body(void ()** %out) #0 { +; CHECK-LABEL: define void @func_ptr_type(void ()**) #2 { +; CHECK: %2 = call %func_ptr_type @func_ptr_type.body(void ()** undef) +define void @func_ptr_type(void()** %out) #0 { + %func = load void()*, void()** undef + store void()* %func, void()** %out + ret void +} + +; CHECK-LABEL: define private %out_arg_small_array @out_arg_small_array.body([4 x i32]* %val) #0 { +; CHECK-NEXT: ret %out_arg_small_array { [4 x i32] [i32 0, i32 1, i32 2, i32 3] } + +; CHECK-LABEL: define void @out_arg_small_array([4 x i32]*) #2 { +define void @out_arg_small_array([4 x i32]* %val) #0 { + store [4 x i32] [i32 0, i32 1, i32 2, i32 3], [4 x i32]* %val + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define void @out_arg_large_array([17 x i32]* %val) #0 { +; CHECK-NEXT: store [17 x i32] zeroinitializer, [17 x i32]* %val +; CHECK-NEXT: ret void +define void @out_arg_large_array([17 x i32]* %val) #0 { + store [17 x i32] zeroinitializer, [17 x i32]* %val + ret void +} + +; CHECK-NOT: define +; CHECK-LABEL: define <16 x i32> @num_regs_return_limit(i32* %out, i32 %val) #0 { +define <16 x i32> @num_regs_return_limit(i32* %out, i32 %val) #0 { + %load = load volatile <16 x i32>, <16 x i32> addrspace(1)* undef + store i32 %val, i32* %out + ret <16 x i32> %load +} + +; CHECK-LABEL: define private %num_regs_reach_limit @num_regs_reach_limit.body(i32* %out, i32 %val) #0 { +; CHECK: define [15 x i32] @num_regs_reach_limit(i32*, i32) #2 { +; CHECK-NEXT: call %num_regs_reach_limit @num_regs_reach_limit.body(i32* undef, i32 %1) +define [15 x i32] @num_regs_reach_limit(i32* %out, i32 %val) #0 { + %load = load volatile [15 x i32], [15 x i32] addrspace(1)* undef + store i32 %val, i32* %out + ret [15 x i32] %load +} + +; CHECK-LABEL: define private %num_regs_reach_limit_leftover @num_regs_reach_limit_leftover.body(i32* %out0, i32* %out1, i32 %val0) #0 { +; CHECK-NEXT: %load0 = load volatile [15 x i32], [15 x i32] addrspace(1)* undef +; CHECK-NEXT: %load1 = load volatile i32, i32 addrspace(1)* undef +; CHECK-NEXT: %1 = insertvalue %num_regs_reach_limit_leftover undef, [15 x i32] %load0, 0 +; CHECK-NEXT: %2 = insertvalue %num_regs_reach_limit_leftover %1, i32 %load1, 1 +; CHECK-NEXT: %3 = insertvalue %num_regs_reach_limit_leftover %2, i32 %val0, 2 +; CHECK-NEXT: ret %num_regs_reach_limit_leftover %3 + +; CHECK-LABEL: define [15 x i32] @num_regs_reach_limit_leftover(i32*, i32*, i32) #2 { +; CHECK-NEXT: %4 = call %num_regs_reach_limit_leftover @num_regs_reach_limit_leftover.body(i32* undef, i32* undef, i32 %2) +; CHECK-NEXT: %5 = extractvalue %num_regs_reach_limit_leftover %4, 1 +; CHECK-NEXT: store i32 %5, i32* %0, align 4 +; CHECK-NEXT: %6 = extractvalue %num_regs_reach_limit_leftover %4, 2 +; CHECK-NEXT: store i32 %6, i32* %1, align 4 +; CHECK-NEXT: %7 = extractvalue %num_regs_reach_limit_leftover %4, 0 +; CHECK-NEXT: ret [15 x i32] %7 +define [15 x i32] @num_regs_reach_limit_leftover(i32* %out0, i32* %out1, i32 %val0) #0 { + %load0 = load volatile [15 x i32], [15 x i32] addrspace(1)* undef + %load1 = load volatile i32, i32 addrspace(1)* undef + store i32 %val0, i32* %out0 + store i32 %load1, i32* %out1 + ret [15 x i32] %load0 +} + +; CHECK-LABEL: define private %preserve_debug_info @preserve_debug_info.body(i32 %arg0, i32* %val) #0 { +; CHECK-NEXT: call void @may.clobber(), !dbg !5 +; CHECK-NEXT: %1 = insertvalue %preserve_debug_info undef, i32 %arg0, 0, !dbg !11 +; CHECK-NEXT: ret %preserve_debug_info %1, !dbg !11 + +; CHECK-LABEL: define void @preserve_debug_info(i32, i32*) #2 !dbg !6 { +; CHECK-NEXT: %3 = call %preserve_debug_info @preserve_debug_info.body(i32 %0, i32* undef){{$}} +; CHECK-NEXT: %4 = extractvalue %preserve_debug_info %3, 0{{$}} +; CHECK-NEXT: store i32 %4, i32* %1, align 4{{$}} +; CHECK-NEXT: ret void +define void @preserve_debug_info(i32 %arg0, i32* %val) #0 !dbg !5 { + call void @may.clobber(), !dbg !10 + store i32 %arg0, i32* %val, !dbg !11 + ret void, !dbg !12 +} + +define void @preserve_metadata(i32 %arg0, i32* %val) #0 !kernel_arg_access_qual !13 { + call void @may.clobber() + store i32 %arg0, i32* %val + ret void +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind noinline optnone } +attributes #2 = { alwaysinline nounwind } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "code-object-metadata-kernel-debug-props.cl", directory: "/some/random/directory") +!2 = !{} +!3 = !{i32 2, !"Dwarf Version", i32 2} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!5 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!6 = !DISubroutineType(types: !7) +!7 = !{null, !8} +!8 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !9, size: 64) +!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!10 = !DILocation(line: 2, column: 3, scope: !5) +!11 = !DILocation(line: 2, column: 8, scope: !5) +!12 = !DILocation(line: 3, column: 3, scope: !5) +!13 = !{!"none"}