Index: include/llvm/CodeGen/MachineFrameInfo.h =================================================================== --- include/llvm/CodeGen/MachineFrameInfo.h +++ include/llvm/CodeGen/MachineFrameInfo.h @@ -559,8 +559,7 @@ return Objects[ObjectIdx+NumFixedObjects].isAliased; } - /// isImmutableObjectIndex - Returns true if the specified index corresponds - /// to an immutable object. + /// Returns true if the specified index corresponds to an immutable object. bool isImmutableObjectIndex(int ObjectIdx) const { // Tail calling functions can clobber their function arguments. if (HasTailCall) Index: include/llvm/CodeGen/SelectionDAGISel.h =================================================================== --- include/llvm/CodeGen/SelectionDAGISel.h +++ include/llvm/CodeGen/SelectionDAGISel.h @@ -54,6 +54,7 @@ const TargetInstrInfo *TII; const TargetLowering *TLI; bool FastISelFailed; + SmallPtrSet ElidedArgCopyInstrs; static char ID; Index: include/llvm/Target/TargetCallingConv.h =================================================================== --- include/llvm/Target/TargetCallingConv.h +++ include/llvm/Target/TargetCallingConv.h @@ -45,6 +45,7 @@ unsigned OrigAlign : 5; ///< Log 2 of original alignment unsigned IsInConsecutiveRegsLast : 1; unsigned IsInConsecutiveRegs : 1; + unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate unsigned ByValSize; ///< Byval struct size @@ -54,7 +55,8 @@ IsReturned(0), IsSplit(0), IsInAlloca(0), IsSplitEnd(0), IsSwiftSelf(0), IsSwiftError(0), IsHva(0), IsHvaStart(0), IsSecArgPass(0), ByValAlign(0), OrigAlign(0), - IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0), ByValSize(0) { + IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0), + IsCopyElisionCandidate(0), ByValSize(0) { static_assert(sizeof(*this) == 2 * sizeof(unsigned), "flags are too big"); } @@ -109,6 +111,9 @@ bool isSplitEnd() const { return IsSplitEnd; } void setSplitEnd() { IsSplitEnd = 1; } + bool isCopyElisionCandidate() const { return IsCopyElisionCandidate; } + void setCopyElisionCandidate() { IsCopyElisionCandidate = 1; } + unsigned getByValAlign() const { return (1U << ByValAlign) / 2; } void setByValAlign(unsigned A) { ByValAlign = Log2_32(A) + 1; Index: lib/CodeGen/AsmPrinter/DwarfDebug.h =================================================================== --- lib/CodeGen/AsmPrinter/DwarfDebug.h +++ lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -85,7 +85,7 @@ assert(!MInsn && "Already initialized?"); assert((!E || E->isValid()) && "Expected valid expression"); - assert(~FI && "Expected valid index"); + assert(FI != INT_MAX && "Expected valid index"); Expr.push_back(E); FrameIndex.push_back(FI); Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -8050,6 +8050,137 @@ return true; } +typedef DenseMap> + ArgCopyElisionMapTy; + +/// Scan the entry block of the function in FuncInfo for arguments that look +/// like copies into a local alloca. Record any copied arguments in +/// ArgCopyElisionCandidates. +static void +findArgumentCopyElisionCandidates(const DataLayout &DL, + FunctionLoweringInfo *FuncInfo, + ArgCopyElisionMapTy &ArgCopyElisionCandidates) { + // Record the state of every static alloca used in the entry block. Argument + // allocas are all used in the entry block, so we need approximately as many + // entries as we have arguments. + enum StaticAllocaInfo { Unknown, Clobbered, Elidable }; + SmallDenseMap StaticAllocas; + unsigned NumArgs = FuncInfo->Fn->getArgumentList().size(); + StaticAllocas.reserve(NumArgs * 2); + + auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * { + if (!V) + return nullptr; + V = V->stripPointerCasts(); + const auto *AI = dyn_cast(V); + if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI)) + return nullptr; + auto Iter = StaticAllocas.insert({AI, Unknown}); + return &Iter.first->second; + }; + + // Look for stores of arguments to static allocas. Look through bitcasts and + // GEPs to handle type coercions, as long as the alloca is fully initialized + // by the store. Any non-store use of an alloca escapes it and any subsequent + // unanalyzed store might write it. + // FIXME: Handle structs initialized with multiple stores. + for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) { + // Look for stores, and handle non-store uses conservatively. + const auto *SI = dyn_cast(&I); + if (!SI) { + // We will look through cast uses, so ignore them completely. + if (I.isCast()) + continue; + // Ignore debug info intrinsics, they don't escape or store to allocas. + if (isa(I)) + continue; + // This is an unknown instruction. Assume it escapes or writes to all + // static alloca operands. + for (const Use &U : I.operands()) { + if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U)) + *Info = StaticAllocaInfo::Clobbered; + } + continue; + } + + // If the stored value is a static alloca, mark it as escaped. + if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand())) + *Info = StaticAllocaInfo::Clobbered; + + // Check if the destination is a static alloca. + const Value *Dst = SI->getPointerOperand()->stripPointerCasts(); + StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst); + if (!Info) + continue; + const AllocaInst *AI = cast(Dst); + + // Skip allocas that have been initialized or clobbered. + if (*Info != StaticAllocaInfo::Unknown) + continue; + + // Check if the stored value is an argument, and that this store fully + // initializes the alloca. Don't elide copies from the same argument twice. + const Value *Val = SI->getValueOperand()->stripPointerCasts(); + const auto *Arg = dyn_cast(Val); + if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() || + Arg->getType()->isEmptyTy() || + DL.getTypeStoreSize(Arg->getType()) != + DL.getTypeAllocSize(AI->getAllocatedType()) || + ArgCopyElisionCandidates.count(Arg)) { + *Info = StaticAllocaInfo::Clobbered; + continue; + } + + DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI << '\n'); + + // Mark this alloca and store for argument copy elision. + *Info = StaticAllocaInfo::Elidable; + ArgCopyElisionCandidates.insert({Arg, {AI, SI}}); + + // Stop scanning if we've seen all arguments. This will happen early in -O0 + // builds, which is useful, because -O0 builds have large entry blocks and + // many allocas. + if (ArgCopyElisionCandidates.size() == NumArgs) + break; + } +} + +/// Elides the copy of Arg into the static alloca AI. Replaces all uses of the +/// original stack object created for AI with FixedIndex. Suppresses the code +/// generation of the store SI. Returns true if there are any remaining uses +/// Arg. +static bool elideArgumentCopy(FunctionLoweringInfo *FuncInfo, + DenseMap &ArgCopyElisionFrameIndexMap, + SmallPtrSetImpl &ElidedArgCopyInstrs, + const Argument *Arg, const AllocaInst *AI, + int FixedIndex, const StoreInst *SI) { + // Delete the old stack object and replace it with the fixed stack object. + int &AllocaIndex = FuncInfo->StaticAllocaMap[AI]; + int OldIndex = AllocaIndex; + FuncInfo->MF->getFrameInfo().RemoveStackObject(OldIndex); + assert(!FuncInfo->MF->getFrameInfo().isImmutableObjectIndex(FixedIndex) && + "cannot copy elide from immutable fixed stack objects"); + AllocaIndex = FixedIndex; + + DEBUG({ + dbgs() << "Eliding argument copy from " << *Arg << " to " << *AI << '\n' + << " Replacing frame index " << OldIndex << " with " << FixedIndex + << '\n'; + }); + + ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex}); + + // Avoid emitting code for the store implementing the copy. + ElidedArgCopyInstrs.insert(SI); + + for (const Value *U : Arg->users()) { + if (U != SI) + return true; + } + return false; +} + void SelectionDAGISel::LowerArguments(const Function &F) { SelectionDAG &DAG = SDB->DAG; SDLoc dl = SDB->getCurSDLoc(); @@ -8072,6 +8203,12 @@ Ins.push_back(RetArg); } + // Look for stores of arguments to static allocas. Mark such arguments with a + // flag to ask the target to give us the memory location of that argument if + // available. + ArgCopyElisionMapTy ArgCopyElisionCandidates; + findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates); + // Set up the incoming argument description vector. unsigned Idx = 0; for (const Argument &Arg : F.args()) { @@ -8149,6 +8286,8 @@ if (NeedsRegBlock) Flags.setInConsecutiveRegs(); Flags.setOrigAlign(OriginalAlignment); + if (ArgCopyElisionCandidates.count(&Arg)) + Flags.setCopyElisionCandidate(); MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT); unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT); @@ -8221,19 +8360,42 @@ ++i; } + SmallVector Chains; + DenseMap ArgCopyElisionFrameIndexMap; for (const Argument &Arg : F.args()) { ++Idx; SmallVector ArgValues; SmallVector ValueVTs; ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); unsigned NumValues = ValueVTs.size(); + bool ArgHasUses = !Arg.use_empty(); + + // Elide the copying store if the target was able to give us a memory + // location for this argument. + bool ElidedArgCopy = false; + int ArgFrameIndex = INT_MAX; + if (Ins[i].Flags.isCopyElisionCandidate()) { + auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg); + assert(ArgCopyIter != ArgCopyElisionCandidates.end()); + FrameIndexSDNode *FI = dyn_cast(InVals[i].getNode()); + if (FI) { + ElidedArgCopy = true; + ArgFrameIndex = FI->getIndex(); + ArgHasUses = elideArgumentCopy(FuncInfo, ArgCopyElisionFrameIndexMap, + ElidedArgCopyInstrs, &Arg, + ArgCopyIter->second.first, ArgFrameIndex, + ArgCopyIter->second.second); + } else { + ArgCopyElisionCandidates.erase(ArgCopyIter); + } + } // If this argument is unused then remember its value. It is used to generate // debugging information. bool isSwiftErrorArg = TLI->supportSwiftError() && F.getAttributes().hasAttribute(Idx, Attribute::SwiftError); - if (Arg.use_empty() && NumValues && !isSwiftErrorArg) { + if (!ArgHasUses && NumValues && !isSwiftErrorArg) { SDB->setUnusedArgValue(&Arg, InVals[i]); // Also remember any frame index for use in FastISel. @@ -8250,16 +8412,23 @@ // Even an apparant 'unused' swifterror argument needs to be returned. So // we do generate a copy for it that can be used on return from the // function. - if (!Arg.use_empty() || isSwiftErrorArg) { + if (ArgHasUses || isSwiftErrorArg) { Optional AssertOp; if (F.getAttributes().hasAttribute(Idx, Attribute::SExt)) AssertOp = ISD::AssertSext; else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt)) AssertOp = ISD::AssertZext; - ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], - NumParts, PartVT, VT, - nullptr, AssertOp)); + if (ElidedArgCopy) { + int FI = cast(InVals[i].getNode())->getIndex(); + ArgValues.push_back( + DAG.getLoad(VT, dl, NewRoot, InVals[i], + MachinePointerInfo::getFixedStack(*MF, FI, 0))); + Chains.push_back(ArgValues.back().getValue(1)); + } else { + ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, + PartVT, VT, nullptr, AssertOp)); + } } i += NumParts; @@ -8313,8 +8482,24 @@ } } + if (!Chains.empty()) + NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); + + DAG.setRoot(NewRoot); + assert(i == InVals.size() && "Argument register count mismatch!"); + // If any argument copy elisions occurred and we have debug info, update the + // stale frame indices used in the dbg.declare variable info table. + MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo(); + if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) { + for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) { + auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot); + if (I != ArgCopyElisionFrameIndexMap.end()) + VI.Slot = I->second; + } + } + // Finally, if the target has anything special to do, allow it to do so. EmitFunctionEntryCode(); } Index: lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -713,8 +713,10 @@ bool &HadTailCall) { // Lower the instructions. If a call is emitted as a tail call, cease emitting // nodes for this block. - for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) - SDB->visit(*I); + for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) { + if (!ElidedArgCopyInstrs.count(&*I)) + SDB->visit(*I); + } // Make sure the root of the DAG is up-to-date. CurDAG->setRoot(SDB->getControlRoot()); @@ -1558,7 +1560,8 @@ const Instruction *Inst = &*std::prev(BI); // If we no longer require this instruction, skip it. - if (isFoldedOrDeadInstruction(Inst, FuncInfo)) { + if (isFoldedOrDeadInstruction(Inst, FuncInfo) || + ElidedArgCopyInstrs.count(Inst)) { --NumFastIselRemaining; continue; } @@ -1682,6 +1685,7 @@ FinishBasicBlock(); FuncInfo->PHINodesToUpdate.clear(); + ElidedArgCopyInstrs.clear(); } propagateSwiftErrorVRegs(FuncInfo); Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -2712,6 +2712,14 @@ MFI.setObjectOffset(FI, Offset); } return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + } else if (Flags.isCopyElisionCandidate()) { + // This is a copy elision candidate that happened to be in memory. Return + // the frame index without forming the load to indicate that copy elision + // succeeded. Create a *mutable* stack object, since now user code may + // modify the argument memory. + int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, + VA.getLocMemOffset(), /*Immutable=*/false); + return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); } else { int FI = MFI.CreateFixedObject(ValVT.getSizeInBits()/8, VA.getLocMemOffset(), isImmutable); Index: test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll =================================================================== --- test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll +++ test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll @@ -8,9 +8,10 @@ @.str = internal constant [4 x i8] c"%p\0A\00" ; <[4 x i8]*> [#uses=1] @llvm.used = appending global [1 x i8*] [i8* bitcast (i8* (%struct.S*, i32, %struct.S*)* @_Z4test1SiS_ to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0] -; Verify that %esi gets spilled before the call. +; Verify that %s1 gets spilled before the call. ; CHECK: Z4test1SiS -; CHECK: movl %esi,{{.*}}(%ebp) +; CHECK: leal 8(%ebp), %[[reg:[^ ]*]] +; CHECK: movl %[[reg]],{{.*}}(%ebp) ## 4-byte Spill ; CHECK: calll __Z6throwsv define i8* @_Z4test1SiS_(%struct.S* byval %s1, i32 %n, %struct.S* byval %s2) ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { Index: test/CodeGen/X86/arg-copy-elide.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/arg-copy-elide.ll @@ -0,0 +1,188 @@ +; RUN: llc -mtriple=i686-windows < %s | FileCheck %s + +declare void @addrof_i32(i32*) +declare void @addrof_i64(i64*) +declare void @addrof_i128(i128*) +declare void @addrof_i32_x3(i32*, i32*, i32*) + +define void @simple(i32 %x) { +entry: + %x.addr = alloca i32 + store i32 %x, i32* %x.addr + call void @addrof_i32(i32* %x.addr) + ret void +} + +; CHECK-LABEL: _simple: +; CHECK: leal 4(%esp), %[[reg:[^ ]*]] +; CHECK: pushl %[[reg]] +; CHECK: calll _addrof_i32 +; CHECK: retl + + +; We need to load %x before calling addrof_i32 now because it could mutate %x in +; place. + +define i32 @use_arg(i32 %x) { +entry: + %x.addr = alloca i32 + store i32 %x, i32* %x.addr + call void @addrof_i32(i32* %x.addr) + ret i32 %x +} + +; CHECK-LABEL: _use_arg: +; CHECK: pushl %[[csr:[^ ]*]] +; CHECK-DAG: movl 8(%esp), %[[csr]] +; CHECK-DAG: leal 8(%esp), %[[reg:[^ ]*]] +; CHECK: pushl %[[reg]] +; CHECK: calll _addrof_i32 +; CHECK: movl %[[csr]], %eax +; CHECK: popl %[[csr]] +; CHECK: retl + + +define i64 @split_i64(i64 %x) { +entry: + %x.addr = alloca i64 + store i64 %x, i64* %x.addr + call void @addrof_i64(i64* %x.addr) + ret i64 %x +} + +; CHECK-LABEL: _split_i64: +; CHECK: pushl %ebp +; CHECK: movl %esp, %ebp +; CHECK: pushl %[[csr2:[^ ]*]] +; CHECK: pushl %[[csr1:[^ ]*]] +; CHECK: andl $-8, %esp +; CHECK-DAG: movl 8(%ebp), %[[csr1]] +; CHECK-DAG: movl 12(%ebp), %[[csr2]] +; CHECK-DAG: leal 8(%ebp), %[[reg:[^ ]*]] +; CHECK: pushl %[[reg]] +; CHECK: calll _addrof_i64 +; CHECK-DAG: movl %[[csr1]], %eax +; CHECK-DAG: movl %[[csr2]], %edx +; CHECK: leal -8(%ebp), %esp +; CHECK: popl %[[csr1]] +; CHECK: popl %[[csr2]] +; CHECK: popl %ebp +; CHECK: retl + + +; The code we generate for this is unimportant. This is mostly a crash test. + +define void @split_i128(i128* %sret, i128 %x) { +entry: + %x.addr = alloca i128 + store i128 %x, i128* %x.addr + call void @addrof_i128(i128* %x.addr) + store i128 %x, i128* %sret + ret void +} + +; CHECK-LABEL: _split_i128: +; CHECK: pushl %ebp +; CHECK: calll _addrof_i128 +; CHECK: retl + + +; Check that we load all of x, y, and z before the call. + +define i32 @three_args(i32 %x, i32 %y, i32 %z) { +entry: + %z.addr = alloca i32, align 4 + %y.addr = alloca i32, align 4 + %x.addr = alloca i32, align 4 + store i32 %z, i32* %z.addr, align 4 + store i32 %y, i32* %y.addr, align 4 + store i32 %x, i32* %x.addr, align 4 + call void @addrof_i32_x3(i32* %x.addr, i32* %y.addr, i32* %z.addr) + %s1 = add i32 %x, %y + %sum = add i32 %s1, %z + ret i32 %sum +} + +; CHECK-LABEL: _three_args: +; CHECK: pushl %[[csr:[^ ]*]] +; CHECK-DAG: movl {{[0-9]+}}(%esp), %[[csr]] +; CHECK-DAG: addl {{[0-9]+}}(%esp), %[[csr]] +; CHECK-DAG: addl {{[0-9]+}}(%esp), %[[csr]] +; CHECK-DAG: leal 8(%esp), %[[x:[^ ]*]] +; CHECK-DAG: leal 12(%esp), %[[y:[^ ]*]] +; CHECK-DAG: leal 16(%esp), %[[z:[^ ]*]] +; CHECK: pushl %[[z]] +; CHECK: pushl %[[y]] +; CHECK: pushl %[[x]] +; CHECK: calll _addrof_i32_x3 +; CHECK: movl %[[csr]], %eax +; CHECK: popl %[[csr]] +; CHECK: retl + + +define void @two_args_same_alloca(i32 %x, i32 %y) { +entry: + %x.addr = alloca i32 + store i32 %x, i32* %x.addr + store i32 %y, i32* %x.addr + call void @addrof_i32(i32* %x.addr) + ret void +} + +; CHECK-LABEL: _two_args_same_alloca: +; CHECK: movl 8(%esp), {{.*}} +; CHECK: movl {{.*}}, 4(%esp) +; CHECK: leal 4(%esp), %[[reg:[^ ]*]] +; CHECK: pushl %[[reg]] +; CHECK: calll _addrof_i32 +; CHECK: retl + + +define void @avoid_byval(i32* byval %x) { +entry: + %x.p.p = alloca i32* + store i32* %x, i32** %x.p.p + call void @addrof_i32(i32* %x) + ret void +} + +; CHECK-LABEL: _avoid_byval: +; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]] +; CHECK: pushl %[[reg]] +; CHECK: calll _addrof_i32 +; CHECK: retl + + +define void @avoid_inalloca(i32* inalloca %x) { +entry: + %x.p.p = alloca i32* + store i32* %x, i32** %x.p.p + call void @addrof_i32(i32* %x) + ret void +} + +; CHECK-LABEL: _avoid_inalloca: +; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]] +; CHECK: pushl %[[reg]] +; CHECK: calll _addrof_i32 +; CHECK: retl + + +; Don't elide the copy when the alloca is escaped with a store. + +define void @escape_with_store(i32 %x) { + %x1 = alloca i32 + %x2 = alloca i32* + store i32* %x1, i32** %x2 + %x3 = load i32*, i32** %x2 + store i32 0, i32* %x3 + store i32 %x, i32* %x1 + call void @addrof_i32(i32* %x1) + ret void +} + +; CHECK: _escape_with_store: +; CHECK-DAG: movl {{.*}}(%esp), %[[reg:[^ ]*]] +; CHECK-DAG: movl $0, [[offs:[0-9]*]](%esp) +; CHECK: movl %[[reg]], [[offs]](%esp) +; CHECK: calll _addrof_i32 Index: test/CodeGen/X86/inline-asm-tied.ll =================================================================== --- test/CodeGen/X86/inline-asm-tied.ll +++ test/CodeGen/X86/inline-asm-tied.ll @@ -1,31 +1,27 @@ ; RUN: llc < %s -mtriple=i386-apple-darwin9 -O0 -optimize-regalloc -regalloc=basic -no-integrated-as | FileCheck %s ; rdar://6992609 -; CHECK: movl %ecx, 4([[ESP:%e..]]) -; CHECK: movl 4([[ESP]]), [[EDX:%e..]] -; CHECK: movl [[EDX]], 4([[ESP]]) target triple = "i386-apple-darwin9.0" -@llvm.used = appending global [1 x i8*] [i8* bitcast (i64 (i64)* @_OSSwapInt64 to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0] define i64 @_OSSwapInt64(i64 %_data) nounwind { entry: - %retval = alloca i64 ; [#uses=2] - %_data.addr = alloca i64 ; [#uses=4] - store i64 %_data, i64* %_data.addr - %tmp = load i64, i64* %_data.addr ; [#uses=1] - %0 = call i64 asm "bswap %eax\0A\09bswap %edx\0A\09xchgl %eax, %edx", "=A,0,~{dirflag},~{fpsr},~{flags}"(i64 %tmp) nounwind ; [#uses=1] - store i64 %0, i64* %_data.addr - %tmp1 = load i64, i64* %_data.addr ; [#uses=1] - store i64 %tmp1, i64* %retval - %1 = load i64, i64* %retval ; [#uses=1] - ret i64 %1 + %0 = call i64 asm "bswap %eax\0A\09bswap %edx\0A\09xchgl %eax, %%edx", "=A,0,~{dirflag},~{fpsr},~{flags}"(i64 %_data) nounwind + ret i64 %0 } +; CHECK-LABEL: __OSSwapInt64: +; CHECK-DAG: movl 8(%esp), %edx +; CHECK-DAG: movl 4(%esp), %eax +; CHECK: ## InlineAsm Start +; CHECK: ## InlineAsm End +; Everything is set up in EAX:EDX, return immediately. +; CHECK-NEXT: retl + ; The tied operands are not necessarily in the same order as the defs. ; PR13742 define i64 @swapped(i64 %x, i64 %y) nounwind { entry: - %x0 = call { i64, i64 } asm "foo", "=r,=r,1,0,~{dirflag},~{fpsr},~{flags}"(i64 %x, i64 %y) nounwind - %x1 = extractvalue { i64, i64 } %x0, 0 - ret i64 %x1 + %x0 = call { i64, i64 } asm "foo", "=r,=r,1,0,~{dirflag},~{fpsr},~{flags}"(i64 %x, i64 %y) nounwind + %x1 = extractvalue { i64, i64 } %x0, 0 + ret i64 %x1 } Index: test/CodeGen/X86/pr30430.ll =================================================================== --- test/CodeGen/X86/pr30430.ll +++ test/CodeGen/X86/pr30430.ll @@ -14,14 +14,6 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $256, %rsp # imm = 0x100 -; CHECK-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm10 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm11 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm12 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm13 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm14 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm15 = mem[0],zero,zero,zero ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovss %xmm1, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovss %xmm2, {{[0-9]+}}(%rsp) @@ -30,14 +22,6 @@ ; CHECK-NEXT: vmovss %xmm5, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovss %xmm6, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovss %xmm7, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm15, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm14, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm13, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm12, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm11, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm10, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm9, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovss %xmm8, (%rsp) ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero Index: test/CodeGen/X86/sse-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/sse-intrinsics-fast-isel.ll +++ test/CodeGen/X86/sse-intrinsics-fast-isel.ll @@ -1653,12 +1653,8 @@ define void @test_mm_setcsr(i32 %a0) nounwind { ; X32-LABEL: test_mm_setcsr: ; X32: # BB#0: -; X32-NEXT: pushl %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %esp, %ecx -; X32-NEXT: movl %eax, (%esp) -; X32-NEXT: ldmxcsr (%ecx) -; X32-NEXT: popl %eax +; X32-NEXT: leal 4(%esp), %eax +; X32-NEXT: ldmxcsr (%eax) ; X32-NEXT: retl ; ; X64-LABEL: test_mm_setcsr: Index: test/CodeGen/X86/win64_eh.ll =================================================================== --- test/CodeGen/X86/win64_eh.ll +++ test/CodeGen/X86/win64_eh.ll @@ -82,11 +82,11 @@ } ; WIN64-LABEL: foo3: ; WIN64: .seh_proc foo3 -; NORM: subq $24, %rsp -; ATOM: leaq -24(%rsp), %rsp -; WIN64: .seh_stackalloc 24 +; NORM: subq $16, %rsp +; ATOM: leaq -16(%rsp), %rsp +; WIN64: .seh_stackalloc 16 ; WIN64: .seh_endprologue -; WIN64: addq $24, %rsp +; WIN64: addq $16, %rsp ; WIN64: ret ; WIN64: .seh_endproc Index: test/DebugInfo/X86/discriminator.ll =================================================================== --- test/DebugInfo/X86/discriminator.ll +++ test/DebugInfo/X86/discriminator.ll @@ -59,4 +59,4 @@ ; CHECK: Address Line Column File ISA Discriminator Flags ; CHECK: ------------------ ------ ------ ------ --- ------------- ------------- -; CHECK: 0x0000000000000011 2 0 1 0 42 {{$}} +; CHECK: 0x000000000000000a 2 0 1 0 42 {{$}}