Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -52,10 +52,6 @@ namespace { static cl::opt - CombinerAA("combiner-alias-analysis", cl::Hidden, - cl::desc("Enable DAG combiner alias-analysis heuristics")); - - static cl::opt CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, cl::desc("Enable DAG combiner's use of IR alias analysis")); @@ -406,15 +402,12 @@ /// Holds a pointer to an LSBaseSDNode as well as information on where it /// is located in a sequence of memory operations connected by a chain. struct MemOpLink { - MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): - MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } + MemOpLink(LSBaseSDNode *N, int64_t Offset) + : MemNode(N), OffsetFromBase(Offset) {} // Ptr to the mem node. LSBaseSDNode *MemNode; // Offset from the base ptr. int64_t OffsetFromBase; - // What is the sequence number of this mem node. - // Lowest mem operand in the DAG starts at zero. - unsigned SequenceNum; }; /// This is a helper function for visitMUL to check the profitability @@ -429,7 +422,6 @@ /// constant build_vector of the stored constant values in Stores. SDValue getMergedConstantVectorStore(SelectionDAG &DAG, const SDLoc &SL, ArrayRef Stores, - SmallVectorImpl &Chains, EVT Ty) const; /// This is a helper function for visitAND and visitZERO_EXTEND. Returns @@ -451,10 +443,8 @@ /// This is a helper function for MergeConsecutiveStores. /// Stores that may be merged are placed in StoreNodes. - /// Loads that may alias with those stores are placed in AliasLoadNodes. - void getStoreMergeAndAliasCandidates( - StoreSDNode* St, SmallVectorImpl &StoreNodes, - SmallVectorImpl &AliasLoadNodes); + void getStoreMergeCandidates(StoreSDNode *St, + SmallVectorImpl &StoreNodes); /// Helper function for MergeConsecutiveStores. Checks if /// Candidate stores have indirect dependency through their @@ -1604,11 +1594,9 @@ Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops); } - // Add users to worklist if AA is enabled, since it may introduce - // a lot of new chained token factors while removing memory deps. - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - return CombineTo(N, Result, UseAA /*add to worklist*/); + // Add users to worklist, since we may introduce a lot of new + // chained token factors while removing memory deps. + return CombineTo(N, Result, true /*add to worklist*/); } return Result; @@ -10136,11 +10124,22 @@ // TODO: Handle store large -> read small portion. // TODO: Handle TRUNCSTORE/LOADEXT if (ISD::isNormalLoad(N) && !LD->isVolatile()) { - if (ISD::isNON_TRUNCStore(Chain.getNode())) { + // Either a direct store, or a store off of a TokenFactor can be + // forwarded. + if (Chain->getOpcode() == ISD::TokenFactor) { + for (const SDValue &ChainOp : Chain->op_values()) { + if (ISD::isNON_TRUNCStore(ChainOp.getNode())) { + StoreSDNode *PrevST = cast(ChainOp); + if (PrevST->getBasePtr() == Ptr && + PrevST->getValue().getValueType() == N->getValueType(0)) + return CombineTo(N, PrevST->getOperand(1), Chain); + } + } + } else if (ISD::isNON_TRUNCStore(Chain.getNode())) { StoreSDNode *PrevST = cast(Chain); if (PrevST->getBasePtr() == Ptr && PrevST->getValue().getValueType() == N->getValueType(0)) - return CombineTo(N, Chain.getOperand(1), Chain); + return CombineTo(N, PrevST->getOperand(1), Chain); } } @@ -10158,14 +10157,7 @@ } } - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); -#ifndef NDEBUG - if (CombinerAAOnlyFunc.getNumOccurrences() && - CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) - UseAA = false; -#endif - if (UseAA && LD->isUnindexed()) { + if (LD->isUnindexed()) { // Walk up chain skipping non-aliasing memory nodes. SDValue BetterChain = FindBetterChain(N, Chain); @@ -11244,14 +11236,14 @@ return false; } -SDValue DAGCombiner::getMergedConstantVectorStore( - SelectionDAG &DAG, const SDLoc &SL, ArrayRef Stores, - SmallVectorImpl &Chains, EVT Ty) const { +SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG, + const SDLoc &SL, + ArrayRef Stores, + EVT Ty) const { SmallVector BuildVector; for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) { StoreSDNode *St = cast(Stores[I].MemNode); - Chains.push_back(St->getChain()); BuildVector.push_back(St->getValue()); } @@ -11267,21 +11259,10 @@ int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; - unsigned LatestNodeUsed = 0; - - for (unsigned i=0; i < NumStores; ++i) { - // Find a chain for the new wide-store operand. Notice that some - // of the store nodes that we found may not be selected for inclusion - // in the wide store. The chain we use needs to be the chain of the - // latest store node which is *used* and replaced by the wide store. - if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) - LatestNodeUsed = i; - } SmallVector Chains; // The latest Node in the DAG. - LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; SDLoc DL(StoreNodes[0].MemNode); SDValue StoredVal; @@ -11297,7 +11278,7 @@ assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); if (IsConstantSrc) { - StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Chains, Ty); + StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Ty); } else { SmallVector Ops; for (unsigned i = 0; i < NumStores; ++i) { @@ -11307,7 +11288,6 @@ if (Val.getValueType() != MemVT) return false; Ops.push_back(Val); - Chains.push_back(St->getChain()); } // Build the extracted vector elements back into a vector. @@ -11327,7 +11307,6 @@ for (unsigned i = 0; i < NumStores; ++i) { unsigned Idx = IsLE ? (NumStores - 1 - i) : i; StoreSDNode *St = cast(StoreNodes[Idx].MemNode); - Chains.push_back(St->getChain()); SDValue Val = St->getValue(); StoreInt <<= ElementSizeBytes * 8; @@ -11345,7 +11324,9 @@ StoredVal = DAG.getConstant(StoreInt, DL, StoreTy); } - assert(!Chains.empty()); + // Gather all Chains we're inheriting + for (unsigned i = 0; i < NumStores; ++i) + Chains.push_back(StoreNodes[i].MemNode->getChain()); SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); SDValue NewStore = DAG.getStore(NewChain, DL, StoredVal, @@ -11353,45 +11334,19 @@ FirstInChain->getPointerInfo(), FirstInChain->getAlignment()); - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - if (UseAA) { - // Replace all merged stores with the new store. - for (unsigned i = 0; i < NumStores; ++i) - CombineTo(StoreNodes[i].MemNode, NewStore); - } else { - // Replace the last store with the new store. - CombineTo(LatestOp, NewStore); - // Erase all other stores. - for (unsigned i = 0; i < NumStores; ++i) { - if (StoreNodes[i].MemNode == LatestOp) - continue; - StoreSDNode *St = cast(StoreNodes[i].MemNode); - // ReplaceAllUsesWith will replace all uses that existed when it was - // called, but graph optimizations may cause new ones to appear. For - // example, the case in pr14333 looks like - // - // St's chain -> St -> another store -> X - // - // And the only difference from St to the other store is the chain. - // When we change it's chain to be St's chain they become identical, - // get CSEed and the net result is that X is now a use of St. - // Since we know that St is redundant, just iterate. - while (!St->use_empty()) - DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); - deleteAndRecombine(St); - } - } + // Replace all merged stores with the new store + for (unsigned i = 0; i < NumStores; ++i) + CombineTo(StoreNodes[i].MemNode, NewStore); return true; } -void DAGCombiner::getStoreMergeAndAliasCandidates( - StoreSDNode* St, SmallVectorImpl &StoreNodes, - SmallVectorImpl &AliasLoadNodes) { +void DAGCombiner::getStoreMergeCandidates( + StoreSDNode *St, SmallVectorImpl &StoreNodes) { // This holds the base pointer, index, and the offset in bytes from the base // pointer. BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); + EVT MemVT = St->getMemoryVT(); // We must have a base and an offset. if (!BasePtr.Base.getNode()) @@ -11401,104 +11356,49 @@ if (BasePtr.Base.isUndef()) return; - // Walk up the chain and look for nodes with offsets from the same - // base pointer. Stop when reaching an instruction with a different kind - // or instruction which has a different base pointer. - EVT MemVT = St->getMemoryVT(); - unsigned Seq = 0; - StoreSDNode *Index = St; - - - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - - if (UseAA) { - // Look at other users of the same chain. Stores on the same chain do not - // alias. If combiner-aa is enabled, non-aliasing stores are canonicalized - // to be on the same chain, so don't bother looking at adjacent chains. - - SDValue Chain = St->getChain(); - for (auto I = Chain->use_begin(), E = Chain->use_end(); I != E; ++I) { - if (StoreSDNode *OtherST = dyn_cast(*I)) { - if (I.getOperandNo() != 0) - continue; - - if (OtherST->isVolatile() || OtherST->isIndexed()) - continue; - - if (OtherST->getMemoryVT() != MemVT) - continue; - - BaseIndexOffset Ptr = BaseIndexOffset::match(OtherST->getBasePtr(), DAG); - - if (Ptr.equalBaseIndex(BasePtr)) - StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset, Seq++)); - } - } - - return; - } - - while (Index) { - // If the chain has more than one use, then we can't reorder the mem ops. - if (Index != St && !SDValue(Index, 0)->hasOneUse()) - break; - - // Find the base pointer and offset for this memory node. - BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); - - // Check that the base pointer is the same as the original one. - if (!Ptr.equalBaseIndex(BasePtr)) - break; - - // The memory operands must not be volatile. - if (Index->isVolatile() || Index->isIndexed()) - break; - - // No truncation. - if (Index->isTruncatingStore()) - break; - - // The stored memory type must be the same. - if (Index->getMemoryVT() != MemVT) - break; + // We looking for a root node which is an ancestor to all mergable + // stores. We search up through a load, to our root and then down + // through all children. For instance we will find Store{1,2,3} if + // St is Store1, Store2. or Store3 where the root is not a load + // which always true for nonvolatile ops. TODO: Expand + // the search to find all valid candidates through multiple layers of loads. + // + // Root + // |-------|-------| + // Load Load Store3 + // | | + // Store1 Store2 + // + // FIXME: We should be able to climb and + // descend TokenFactors to find candidates as well. - // We do not allow under-aligned stores in order to prevent - // overriding stores. NOTE: this is a bad hack. Alignment SHOULD - // be irrelevant here; what MATTERS is that we not move memory - // operations that potentially overlap past each-other. - if (Index->getAlignment() < MemVT.getStoreSize()) - break; + SDNode *RootNode = (St->getChain()).getNode(); - // We found a potential memory operand to merge. - StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); + // Set of Parents of Candidates + std::set CandidateParents; - // Find the next memory operand in the chain. If the next operand in the - // chain is a store then move up and continue the scan with the next - // memory operand. If the next operand is a load save it and use alias - // information to check if it interferes with anything. - SDNode *NextInChain = Index->getChain().getNode(); - while (1) { - if (StoreSDNode *STn = dyn_cast(NextInChain)) { - // We found a store node. Use it for the next iteration. - Index = STn; - break; - } else if (LoadSDNode *Ldn = dyn_cast(NextInChain)) { - if (Ldn->isVolatile()) { - Index = nullptr; - break; + if (LoadSDNode *Ldn = dyn_cast(RootNode)) { + RootNode = Ldn->getChain().getNode(); + for (auto I = RootNode->use_begin(), E = RootNode->use_end(); I != E; ++I) + if (I.getOperandNo() == 0 && isa(*I)) // walk down chain + CandidateParents.insert(*I); + } else + CandidateParents.insert(RootNode); + + // check all parents of mergable children + for (auto P = CandidateParents.begin(); P != CandidateParents.end(); ++P) + for (auto I = (*P)->use_begin(), E = (*P)->use_end(); I != E; ++I) + if (I.getOperandNo() == 0) + if (StoreSDNode *OtherST = dyn_cast(*I)) { + if (OtherST->isVolatile() || OtherST->isIndexed()) + continue; + if (OtherST->getMemoryVT() != MemVT) + continue; + BaseIndexOffset Ptr = + BaseIndexOffset::match(OtherST->getBasePtr(), DAG); + if (Ptr.equalBaseIndex(BasePtr)) + StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset)); } - - // Save the load node for later. Continue the scan. - AliasLoadNodes.push_back(Ldn); - NextInChain = Ldn->getChain().getNode(); - continue; - } else { - Index = nullptr; - break; - } - } - } } // We need to check that merging these stores does not cause a loop @@ -11518,10 +11418,9 @@ Worklist.push_back(n->getOperand(j).getNode()); } // search through DAG. We can stop early if we find a storenode - for (unsigned i = 0; i < StoreNodes.size(); ++i) { + for (unsigned i = 0; i < StoreNodes.size(); ++i) if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist)) return false; - } return true; } @@ -11559,67 +11458,36 @@ if (MemVT.isVector() && IsLoadSrc) return false; - // Only look at ends of store sequences. - SDValue Chain = SDValue(St, 0); - if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) - return false; - - // Save the LoadSDNodes that we find in the chain. - // We need to make sure that these nodes do not interfere with - // any of the store nodes. - SmallVector AliasLoadNodes; - - // Save the StoreSDNodes that we find in the chain. + // Find potential store merge candidates by searching through chain sub-DAG SmallVector StoreNodes; - - getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes); + getStoreMergeCandidates(St, StoreNodes); // Check if there is anything to merge. if (StoreNodes.size() < 2) return false; - // only do dependence check in AA case - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - if (UseAA && !checkMergeStoreCandidatesForDependencies(StoreNodes)) + // Check that we can merge these candidates without causing a cycle + if (!checkMergeStoreCandidatesForDependencies(StoreNodes)) return false; // Sort the memory operands according to their distance from the - // base pointer. As a secondary criteria: make sure stores coming - // later in the code come first in the list. This is important for - // the non-UseAA case, because we're merging stores into the FINAL - // store along a chain which potentially contains aliasing stores. - // Thus, if there are multiple stores to the same address, the last - // one can be considered for merging but not the others. + // base pointer. std::sort(StoreNodes.begin(), StoreNodes.end(), [](MemOpLink LHS, MemOpLink RHS) { - return LHS.OffsetFromBase < RHS.OffsetFromBase || - (LHS.OffsetFromBase == RHS.OffsetFromBase && - LHS.SequenceNum < RHS.SequenceNum); - }); + return LHS.OffsetFromBase < RHS.OffsetFromBase; + }); // Scan the memory operations on the chain and find the first non-consecutive // store memory address. unsigned LastConsecutiveStore = 0; int64_t StartAddress = StoreNodes[0].OffsetFromBase; - for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) { - - // Check that the addresses are consecutive starting from the second - // element in the list of stores. - if (i > 0) { - int64_t CurrAddress = StoreNodes[i].OffsetFromBase; - if (CurrAddress - StartAddress != (ElementSizeBytes * i)) - break; - } - // Check if this store interferes with any of the loads that we found. - // If we find a load that alias with this store. Stop the sequence. - if (any_of(AliasLoadNodes, [&](LSBaseSDNode *Ldn) { - return isAlias(Ldn, StoreNodes[i].MemNode); - })) + // Check that the addresses are consecutive starting from the second + // element in the list of stores. + for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) { + int64_t CurrAddress = StoreNodes[i].OffsetFromBase; + if (CurrAddress - StartAddress != (ElementSizeBytes * i)) break; - - // Mark this node as useful. LastConsecutiveStore = i; } @@ -11773,7 +11641,7 @@ } // We found a potential memory operand to merge. - LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); + LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset)); } if (LoadNodes.size() < 2) @@ -11862,22 +11730,8 @@ // Collect the chains from all merged stores. SmallVector MergeStoreChains; - MergeStoreChains.push_back(StoreNodes[0].MemNode->getChain()); - - // The latest Node in the DAG. - unsigned LatestNodeUsed = 0; - for (unsigned i=1; igetChain()); - } - - LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; // Find if it is better to use vectors or integers to load and store // to memory. @@ -11912,23 +11766,9 @@ SDValue(NewLoad.getNode(), 1)); } - if (UseAA) { - // Replace the all stores with the new store. - for (unsigned i = 0; i < NumElem; ++i) - CombineTo(StoreNodes[i].MemNode, NewStore); - } else { - // Replace the last store with the new store. - CombineTo(LatestOp, NewStore); - // Erase all other stores. - for (unsigned i = 0; i < NumElem; ++i) { - // Remove all Store nodes. - if (StoreNodes[i].MemNode == LatestOp) - continue; - StoreSDNode *St = cast(StoreNodes[i].MemNode); - DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); - deleteAndRecombine(St); - } - } + // Replace the all stores with the new store. + for (unsigned i = 0; i < NumElem; ++i) + CombineTo(StoreNodes[i].MemNode, NewStore); return true; } @@ -12086,19 +11926,7 @@ if (SDValue NewST = TransformFPLoadStorePair(N)) return NewST; - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); -#ifndef NDEBUG - if (CombinerAAOnlyFunc.getNumOccurrences() && - CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) - UseAA = false; -#endif - if (UseAA && ST->isUnindexed()) { - // FIXME: We should do this even without AA enabled. AA will just allow - // FindBetterChain to work in more situations. The problem with this is that - // any combine that expects memory operations to be on consecutive chains - // first needs to be updated to look for users of the same chain. - + if (ST->isUnindexed()) { // Walk up chain skipping non-aliasing memory nodes, on this store and any // adjacent stores. if (findBetterNeighborChains(ST)) { @@ -12131,11 +11959,16 @@ // Otherwise, see if we can simplify the operation with // SimplifyDemandedBits, which only works if the value has a single use. - if (SimplifyDemandedBits(Value, - APInt::getLowBitsSet( - Value.getValueType().getScalarType().getSizeInBits(), - ST->getMemoryVT().getScalarType().getSizeInBits()))) + if (SimplifyDemandedBits( + Value, APInt::getLowBitsSet( + Value.getValueType().getScalarType().getSizeInBits(), + ST->getMemoryVT().getScalarType().getSizeInBits()))) { + // Re-visit the store if anything changed; SimplifyDemandedBits + // will add Value's node back to the worklist if necessary, but + // we also need to re-visit the Store node itself. + AddToWorklist(N); return SDValue(N, 0); + } } // If this is a load followed by a store to the same location, then the store @@ -14973,6 +14806,18 @@ return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases); } +// This function tries to collect a bunch of potentially interesting +// nodes to improve the chains of, all at once. This might seem +// redundant, as this function gets called when visiting every store +// node, so why not let the work be done on each store as it's visited? +// +// I believe this is mainly important because MergeConsecutiveStores +// is unable to deal with merging stores of different sizes, so unless +// we improve the chains of all the potential candidates up-front +// before running MergeConsecutiveStores, it might only see some of +// the nodes that will eventually be candidates, and then not be able +// to go from a partially-merged state to the desired final +// fully-merged state. bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) { // This holds the base pointer, index, and the offset in bytes from the base // pointer. @@ -15008,10 +14853,8 @@ if (!Ptr.equalBaseIndex(BasePtr)) break; - // Find the next memory operand in the chain. If the next operand in the - // chain is a store then move up and continue the scan with the next - // memory operand. If the next operand is a load save it and use alias - // information to check if it interferes with anything. + // Walk up the chain to find the next store node, ignoring any + // intermediate loads. Any other kind of node will halt the loop. SDNode *NextInChain = Index->getChain().getNode(); while (true) { if (StoreSDNode *STn = dyn_cast(NextInChain)) { @@ -15030,9 +14873,14 @@ Index = nullptr; break; } - } + } // end while } + // At this point, ChainedStores lists all of the Store nodes + // reachable by iterating up through chain nodes matching the above + // conditions. For each such store identified, try to find an + // earlier chain to attach the store to which won't violate the + // required ordering. bool MadeChangeToSt = false; SmallVector, 8> BetterChains; Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -5038,7 +5038,7 @@ assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = getEVTAlignment(VT); + Alignment = getEVTAlignment(MemVT); MMOFlags |= MachineMemOperand::MOLoad; assert((MMOFlags & MachineMemOperand::MOStore) == 0); Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -822,7 +822,7 @@ MinFunctionAlignment = 0; PrefFunctionAlignment = 0; PrefLoopAlignment = 0; - GatherAllAliasesMaxDepth = 6; + GatherAllAliasesMaxDepth = 18; MinStackArgumentAlignment = 1; MinimumJumpTableEntries = 4; // TODO: the default will be switched to 0 in the next commit, along Index: test/CodeGen/AArch64/argument-blocks.ll =================================================================== --- test/CodeGen/AArch64/argument-blocks.ll +++ test/CodeGen/AArch64/argument-blocks.ll @@ -62,7 +62,7 @@ ; but should go in an 8-byte aligned slot. define void @test_varargs_stackalign() { ; CHECK-LABEL: test_varargs_stackalign: -; CHECK-DARWINPCS: stp {{w[0-9]+}}, {{w[0-9]+}}, [sp, #16] +; CHECK-DARWINPCS: str {{x[0-9]+}}, [sp, #16] call void(...) @callee([3 x float] undef, [2 x float] [float 1.0, float 2.0]) ret void Index: test/CodeGen/AArch64/arm64-abi-varargs.ll =================================================================== --- test/CodeGen/AArch64/arm64-abi-varargs.ll +++ test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -6,17 +6,13 @@ define void @fn9(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp { ; CHECK-LABEL: fn9: ; 9th fixed argument -; CHECK: ldr {{w[0-9]+}}, [sp, #64] -; CHECK: add [[ARGS:x[0-9]+]], sp, #72 -; CHECK: add {{x[0-9]+}}, [[ARGS]], #8 +; CHECK: add x[[ADDR:[0-9]+]], sp, #72 ; First vararg -; CHECK: ldr {{w[0-9]+}}, [sp, #72] -; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; CHECK-DAG: ldr {{w[0-9]+}}, [sp, #72] ; Second vararg -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] -; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; CHECK-DAG: ldr {{w[0-9]+}}, [x[[ADDR]]] ; Third vararg -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] +; CHECK-DAG: ldr {{w[0-9]+}}, [x[[ADDR]]], #8 %1 = alloca i32, align 4 %2 = alloca i32, align 4 %3 = alloca i32, align 4 Index: test/CodeGen/AArch64/arm64-abi.ll =================================================================== --- test/CodeGen/AArch64/arm64-abi.ll +++ test/CodeGen/AArch64/arm64-abi.ll @@ -205,10 +205,7 @@ define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind { entry: ; CHECK-LABEL: test8 -; CHECK: strb {{w[0-9]+}}, [sp, #3] -; CHECK: strb wzr, [sp, #2] -; CHECK: strb {{w[0-9]+}}, [sp, #1] -; CHECK: strb wzr, [sp] +; CHECK: str w8, [sp] ; CHECK: bl ; FAST-LABEL: test8 ; FAST: strb {{w[0-9]+}}, [sp] Index: test/CodeGen/AArch64/arm64-memset-inline.ll =================================================================== --- test/CodeGen/AArch64/arm64-memset-inline.ll +++ test/CodeGen/AArch64/arm64-memset-inline.ll @@ -9,11 +9,15 @@ ret void } +; FIXME: This shouldn't need to load in a zero value to store +; (e.g. stp xzr,xzr [sp, #16]) + define void @t2() nounwind ssp { entry: ; CHECK-LABEL: t2: +; CHECK: movi v0.2d, #0000000000000000 +; CHECK: stur q0, [sp, #16] ; CHECK: strh wzr, [sp, #32] -; CHECK: stp xzr, xzr, [sp, #16] ; CHECK: str xzr, [sp, #8] %buf = alloca [26 x i8], align 1 %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0 Index: test/CodeGen/AArch64/arm64-stur.ll =================================================================== --- test/CodeGen/AArch64/arm64-stur.ll +++ test/CodeGen/AArch64/arm64-stur.ll @@ -47,11 +47,14 @@ ret void } +;; FIXME: Again, with the writing of a quadword zero... + define void @foo(%struct.X* nocapture %p) nounwind optsize ssp { ; CHECK-LABEL: foo: ; CHECK-NOT: str -; CHECK: stur xzr, [x0, #12] -; CHECK-NEXT: stur xzr, [x0, #4] +; CHECK: stur q0, [x0, #4] +; CHECK-FIXME: stur xzr, [x0, #12] +; CHECK-FIXME-NEXT: stur xzr, [x0, #4] ; CHECK-NEXT: ret %B = getelementptr inbounds %struct.X, %struct.X* %p, i64 0, i32 1 %val = bitcast i64* %B to i8* Index: test/CodeGen/AArch64/merge-store.ll =================================================================== --- test/CodeGen/AArch64/merge-store.ll +++ test/CodeGen/AArch64/merge-store.ll @@ -4,8 +4,9 @@ @g0 = external global <3 x float>, align 16 @g1 = external global <3 x float>, align 4 -; CHECK: ldr s[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]]{{\]}}, #4 -; CHECK: ld1{{\.?s?}} { v[[R0]]{{\.?s?}} }[1], {{\[}}[[R1]]{{\]}} +; CHECK: ldr q[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]], :lo12:g0 +;; TODO: this next line seems like a redundant no-op move? +; CHECK: ins v0.s[1], v0.s[1] ; CHECK: str d[[R0]] define void @blam() { Index: test/CodeGen/AArch64/vector_merge_dep_check.ll =================================================================== --- test/CodeGen/AArch64/vector_merge_dep_check.ll +++ test/CodeGen/AArch64/vector_merge_dep_check.ll @@ -1,5 +1,4 @@ -; RUN: llc --combiner-alias-analysis=false < %s | FileCheck %s -; RUN: llc --combiner-alias-analysis=true < %s | FileCheck %s +; RUN: llc < %s | FileCheck %s ; This test checks that we do not merge stores together which have ; dependencies through their non-chain operands (e.g. one store is the Index: test/CodeGen/AMDGPU/cvt_f32_ubyte.ll =================================================================== --- test/CodeGen/AMDGPU/cvt_f32_ubyte.ll +++ test/CodeGen/AMDGPU/cvt_f32_ubyte.ll @@ -88,12 +88,9 @@ ; SI-DAG: v_cvt_f32_ubyte2_e32 ; SI-DAG: v_cvt_f32_ubyte3_e32 -; SI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 24 -; SI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 16 ; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16 ; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 8 ; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffff, -; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff00, ; SI-DAG: v_add_i32 ; SI: buffer_store_dwordx4 Index: test/CodeGen/AMDGPU/debugger-insert-nops.ll =================================================================== --- test/CodeGen/AMDGPU/debugger-insert-nops.ll +++ test/CodeGen/AMDGPU/debugger-insert-nops.ll @@ -1,13 +1,19 @@ -; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECKNOP -; CHECK: test01.cl:2:{{[0-9]+}} -; CHECK-NEXT: s_nop 0 +; each line has appears +; CHECK-DAG: test01.cl:2:3 +; CHECK-DAG: test01.cl:3:3 +; CHECK-DAG: test01.cl:4:3 -; CHECK: test01.cl:3:{{[0-9]+}} -; CHECK-NEXT: s_nop 0 -; CHECK: test01.cl:4:{{[0-9]+}} -; CHECK-NEXT: s_nop 0 +; each line has followed newline +; CHECKNOP: test01.cl:{{[234]}}:3 +; CHECKNOP-NEXT: s_nop 0 +; CHECKNOP: test01.cl:{{[234]}}:3 +; CHECKNOP-NEXT: s_nop 0 +; CHECKNOP: test01.cl:{{[234]}}:3 +; CHECKNOP-NEXT: s_nop 0 ; CHECK: test01.cl:5:{{[0-9]+}} ; CHECK-NEXT: s_nop 0 @@ -21,7 +27,7 @@ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %A.addr, metadata !17, metadata !18), !dbg !19 %0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !20 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i32 0, !dbg !20 - store i32 1, i32 addrspace(1)* %arrayidx, align 4, !dbg !21 + store i32 1, i32 addrspace(1)* %arrayidx, align 4, !dbg !20 %1 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !22 %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %1, i32 1, !dbg !22 store i32 2, i32 addrspace(1)* %arrayidx1, align 4, !dbg !23 Index: test/CodeGen/AMDGPU/merge-stores.ll =================================================================== --- test/CodeGen/AMDGPU/merge-stores.ll +++ test/CodeGen/AMDGPU/merge-stores.ll @@ -1,8 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s - -; RUN: llc -march=amdgcn -verify-machineinstrs -combiner-alias-analysis < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -combiner-alias-analysis < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s ; Run with devices with different unaligned load restrictions. @@ -148,17 +145,10 @@ ret void } -; FIXME: Should be able to merge this ; GCN-LABEL: {{^}}merge_global_store_4_constants_mixed_i32_f32: -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v - -; GCN-AA: buffer_store_dwordx2 -; GCN-AA: buffer_store_dword v -; GCN-AA: buffer_store_dword v - +; GCN: buffer_store_dwordx2 +; GCN: buffer_store_dword v +; GCN: buffer_store_dword v ; GCN: s_endpgm define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 { %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 @@ -477,17 +467,9 @@ ret void } -; This works once AA is enabled on the subtarget ; GCN-LABEL: {{^}}merge_global_store_4_vector_elts_loads_v4i32: ; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]] - -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v - -; GCN-AA: buffer_store_dwordx4 [[LOAD]] - +; GCN: buffer_store_dwordx4 [[LOAD]] ; GCN: s_endpgm define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 Index: test/CodeGen/AMDGPU/private-element-size.ll =================================================================== --- test/CodeGen/AMDGPU/private-element-size.ll +++ test/CodeGen/AMDGPU/private-element-size.ll @@ -32,10 +32,10 @@ ; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:24{{$}} ; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:28{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}} define void @private_elt_size_v4i32(<4 x i32> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll =================================================================== --- test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll +++ test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll @@ -156,9 +156,8 @@ ; FUNC-LABEL: @reorder_local_offsets ; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:100 offset1:102 -; CI: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:3 offset1:100 -; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12 -; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408 +; CI-DAG: ds_write2_b32 {{v[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:3 offset1:100 +; CI-DAG: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408 ; CI: buffer_store_dword ; CI: s_endpgm define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 { @@ -180,12 +179,12 @@ } ; FUNC-LABEL: @reorder_global_offsets -; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 -; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 -; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 -; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 -; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 -; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 +; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 +; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 +; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 +; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 +; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 +; CI: buffer_store_dword ; CI: s_endpgm define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 { %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3 Index: test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll =================================================================== --- test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll +++ test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll @@ -3,6 +3,13 @@ ; RUN: llc -march=amdgcn -mcpu=hawaii -mtriple=amdgcn-unknown-amdhsa -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CIHSA -check-prefix=HSA %s ; RUN: llc -march=amdgcn -mcpu=fiji -mtriple=amdgcn-unknown-amdhsa -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VIHSA -check-prefix=HSA %s +; FIXME: this fails because the load generated from extractelement is +;; now properly recognized as forwardable to the value stored in +;; insertelement, and thus the loads/stores drop away entirely. This +;; makes the intended test, of running out of registers, not occur. + +;; XFAIL: * + ; This ends up using all 256 registers and requires register ; scavenging which will fail to find an unsued register. Index: test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll =================================================================== --- test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll +++ test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll @@ -1,6 +1,12 @@ ; RUN: llc -march=amdgcn -mcpu=tahiti -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +;; FIXME: this fails because the load generated from extractelement is +;; now properly recognized as forwardable to the value stored in +;; insertelement, and thus the loads/stores drop away entirely. This +;; makes the intended test, of running out of registers, not occur. +;; XFAIL: * + ; This ends up using all 255 registers and requires register ; scavenging which will fail to find an unsued register. Index: test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll =================================================================== --- test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll +++ test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll @@ -12,7 +12,8 @@ entry: ; CHECK: sub sp, sp, #12 ; CHECK: sub sp, sp, #4 -; CHECK: stmib sp, {r1, r2, r3} +; CHECK: add r0, sp, #4 +; CHECK: stm sp, {r0, r1, r2, r3} %g = alloca i8* %g1 = bitcast i8** %g to i8* call void @llvm.va_start(i8* %g1) Index: test/CodeGen/ARM/alloc-no-stack-realign.ll =================================================================== --- test/CodeGen/ARM/alloc-no-stack-realign.ll +++ test/CodeGen/ARM/alloc-no-stack-realign.ll @@ -51,12 +51,12 @@ ; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] +; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] ; REALIGN: orr r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] ; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #32 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #16 -; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] +; REALIGN: orr r[[R1:[0-9]+]], r[[R1]], #16 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] ; REALIGN: add r[[R1:[0-9]+]], r[[R0:0]], #48 Index: test/CodeGen/ARM/ifcvt10.ll =================================================================== --- test/CodeGen/ARM/ifcvt10.ll +++ test/CodeGen/ARM/ifcvt10.ll @@ -10,8 +10,6 @@ ; CHECK: vpop {d8} ; CHECK-NOT: vpopne ; CHECK: pop {r7, pc} -; CHECK: vpop {d8} -; CHECK: pop {r7, pc} br i1 undef, label %if.else, label %if.then if.then: ; preds = %entry Index: test/CodeGen/ARM/memset-inline.ll =================================================================== --- test/CodeGen/ARM/memset-inline.ll +++ test/CodeGen/ARM/memset-inline.ll @@ -3,9 +3,15 @@ define void @t1(i8* nocapture %c) nounwind optsize { entry: ; CHECK-LABEL: t1: + +;; FIXME: like with arm64-memset-inline.ll, learning how to merge +;; stores made this code worse, since it now uses a vector move, +;; instead of just using an strd instruction taking two registers. + +; CHECK: vmov.i32 d16, #0x0 +; CHECK: vst1.32 {d16}, [r0:64]! ; CHECK: movs r1, #0 -; CHECK: strd r1, r1, [r0] -; CHECK: str r1, [r0, #8] +; CHECK: str r1, [r0] call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false) ret void } Index: test/CodeGen/ARM/static-addr-hoisting.ll =================================================================== --- test/CodeGen/ARM/static-addr-hoisting.ll +++ test/CodeGen/ARM/static-addr-hoisting.ll @@ -6,9 +6,9 @@ ; CHECK: movs [[VAL:r[0-9]+]], #42 ; CHECK: movt r[[BASE1]], #15 -; CHECK: str [[VAL]], [r[[BASE1]]] -; CHECK: str [[VAL]], [r[[BASE1]], #24] -; CHECK: str.w [[VAL]], [r[[BASE1]], #42] +; CHECK-DAG: str [[VAL]], [r[[BASE1]]] +; CHECK-DAG: str [[VAL]], [r[[BASE1]], #24] +; CHECK-DAG: str.w [[VAL]], [r[[BASE1]], #42] ; CHECK: movw r[[BASE2:[0-9]+]], #20394 ; CHECK: movt r[[BASE2]], #18 Index: test/CodeGen/BPF/undef.ll =================================================================== --- test/CodeGen/BPF/undef.ll +++ test/CodeGen/BPF/undef.ll @@ -12,51 +12,51 @@ @llvm.used = appending global [6 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @_license, i32 0, i32 0), i8* bitcast (i32 (%struct.__sk_buff*)* @ebpf_filter to i8*), i8* bitcast (%struct.bpf_map_def* @routing to i8*), i8* bitcast (%struct.bpf_map_def* @routing_miss_0 to i8*), i8* bitcast (%struct.bpf_map_def* @test1 to i8*), i8* bitcast (%struct.bpf_map_def* @test1_miss_4 to i8*)], section "llvm.metadata" ; Function Attrs: nounwind uwtable +; CHECK: mov r2, r10 +; CHECK: addi r2, -2 +; CHECK: mov r1, 0 +; CHECK: sth 6(r2), r1 +; CHECK: sth 4(r2), r1 +; CHECK: sth 2(r2), r1 +; CHECK: mov r2, 6 +; CHECK: stb -7(r10), r2 +; CHECK: mov r2, 5 +; CHECK: stb -8(r10), r2 +; CHECK: mov r2, 7 +; CHECK: stb -6(r10), r2 +; CHECK: mov r2, 8 +; CHECK: stb -5(r10), r2 +; CHECK: mov r2, 9 +; CHECK: stb -4(r10), r2 +; CHECK: mov r2, 10 +; CHECK: stb -3(r10), r2 +; CHECK: sth 24(r10), r1 +; CHECK: sth 22(r10), r1 +; CHECK: sth 20(r10), r1 +; CHECK: sth 18(r10), r1 +; CHECK: sth 16(r10), r1 +; CHECK: sth 14(r10), r1 +; CHECK: sth 12(r10), r1 +; CHECK: sth 10(r10), r1 +; CHECK: sth 8(r10), r1 +; CHECK: sth 6(r10), r1 +; CHECK: sth -2(r10), r1 +; CHECK: sth 26(r10), r1 define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 section "socket1" { %key = alloca %struct.routing_key_2, align 1 %1 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 0 -; CHECK: mov r1, 5 -; CHECK: stb -8(r10), r1 store i8 5, i8* %1, align 1 %2 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 1 -; CHECK: mov r1, 6 -; CHECK: stb -7(r10), r1 store i8 6, i8* %2, align 1 %3 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 2 -; CHECK: mov r1, 7 -; CHECK: stb -6(r10), r1 store i8 7, i8* %3, align 1 %4 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 3 -; CHECK: mov r1, 8 -; CHECK: stb -5(r10), r1 store i8 8, i8* %4, align 1 %5 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 4 -; CHECK: mov r1, 9 -; CHECK: stb -4(r10), r1 store i8 9, i8* %5, align 1 %6 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 5 -; CHECK: mov r1, 10 -; CHECK: stb -3(r10), r1 store i8 10, i8* %6, align 1 %7 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 1, i32 0, i64 0 -; CHECK: mov r1, r10 -; CHECK: addi r1, -2 -; CHECK: mov r2, 0 -; CHECK: sth 6(r1), r2 -; CHECK: sth 4(r1), r2 -; CHECK: sth 2(r1), r2 -; CHECK: sth 24(r10), r2 -; CHECK: sth 22(r10), r2 -; CHECK: sth 20(r10), r2 -; CHECK: sth 18(r10), r2 -; CHECK: sth 16(r10), r2 -; CHECK: sth 14(r10), r2 -; CHECK: sth 12(r10), r2 -; CHECK: sth 10(r10), r2 -; CHECK: sth 8(r10), r2 -; CHECK: sth 6(r10), r2 -; CHECK: sth -2(r10), r2 -; CHECK: sth 26(r10), r2 call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 30, i32 1, i1 false) %8 = call i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...) bitcast (i32 (...)* @bpf_map_lookup_elem to i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...)*)(%struct.bpf_map_def* nonnull @routing, %struct.routing_key_2* nonnull %key) #3 ret i32 undef Index: test/CodeGen/MSP430/Inst16mm.ll =================================================================== --- test/CodeGen/MSP430/Inst16mm.ll +++ test/CodeGen/MSP430/Inst16mm.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=msp430 -combiner-alias-analysis < %s | FileCheck %s +; RUN: llc -march=msp430 < %s | FileCheck %s target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8" target triple = "msp430-generic-generic" @foo = common global i16 0, align 2 Index: test/CodeGen/Mips/biggot.ll =================================================================== --- test/CodeGen/Mips/biggot.ll +++ test/CodeGen/Mips/biggot.ll @@ -7,12 +7,14 @@ define void @foo1() nounwind { entry: ; O32-LABEL: foo1: -; O32: lui $[[R0:[0-9]+]], %got_hi(v0) -; O32: addu $[[R1:[0-9]+]], $[[R0]], ${{[a-z0-9]+}} -; O32: lw ${{[0-9]+}}, %got_lo(v0)($[[R1]]) ; O32: lui $[[R2:[0-9]+]], %call_hi(foo0) ; O32: addu $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}} ; O32: lw ${{[0-9]+}}, %call_lo(foo0)($[[R3]]) +; O32: lui $[[R0:[0-9]+]], %got_hi(v0) +; O32: addu $[[R1:[0-9]+]], $[[R0]], ${{[a-z0-9]+}} +; O32: lw ${{[0-9]+}}, %got_lo(v0)($[[R1]]) + + ; N64-LABEL: foo1: ; N64-DAG: lui $[[R0:[0-9]+]], %got_hi(v0) Index: test/CodeGen/Mips/cconv/arguments-float.ll =================================================================== --- test/CodeGen/Mips/cconv/arguments-float.ll +++ test/CodeGen/Mips/cconv/arguments-float.ll @@ -63,39 +63,39 @@ ; NEW-DAG: sd $5, 16([[R2]]) ; O32 has run out of argument registers and starts using the stack -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 16($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 20($sp) ; O32-DAG: sw [[R3]], 24([[R2]]) ; O32-DAG: sw [[R4]], 28([[R2]]) ; NEW-DAG: sd $6, 24([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) ; O32-DAG: sw [[R3]], 32([[R2]]) ; O32-DAG: sw [[R4]], 36([[R2]]) ; NEW-DAG: sd $7, 32([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) ; O32-DAG: sw [[R3]], 40([[R2]]) ; O32-DAG: sw [[R4]], 44([[R2]]) ; NEW-DAG: sd $8, 40([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) ; O32-DAG: sw [[R3]], 48([[R2]]) ; O32-DAG: sw [[R4]], 52([[R2]]) ; NEW-DAG: sd $9, 48([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 56($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 60($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) ; O32-DAG: sw [[R3]], 56([[R2]]) ; O32-DAG: sw [[R4]], 60([[R2]]) ; NEW-DAG: sd $10, 56([[R2]]) ; N32/N64 have run out of registers and starts using the stack too -; O32-DAG: lw [[R3:\$[0-9]+]], 64($sp) -; O32-DAG: lw [[R4:\$[0-9]+]], 68($sp) +; O32-DAG: lw [[R3:\$[0-9]+]], 56($sp) +; O32-DAG: lw [[R4:\$[0-9]+]], 60($sp) ; O32-DAG: sw [[R3]], 64([[R2]]) ; O32-DAG: sw [[R4]], 68([[R2]]) ; NEW-DAG: ld [[R3:\$[0-9]+]], 0($sp) Index: test/CodeGen/Mips/cconv/arguments-varargs.ll =================================================================== --- test/CodeGen/Mips/cconv/arguments-varargs.ll +++ test/CodeGen/Mips/cconv/arguments-varargs.ll @@ -315,12 +315,11 @@ ; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte ; order. ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -349,10 +348,9 @@ ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) @@ -678,12 +676,11 @@ ; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte ; order. ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -712,10 +709,9 @@ ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 ; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) @@ -1040,10 +1036,9 @@ ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) ; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -1072,10 +1067,9 @@ ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) Index: test/CodeGen/Mips/fastcc.ll =================================================================== --- test/CodeGen/Mips/fastcc.ll +++ test/CodeGen/Mips/fastcc.ll @@ -132,20 +132,19 @@ define internal fastcc void @callee0(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15, i32 %a16) nounwind noinline { entry: ; CHECK: callee0 -; CHECK: sw $4 -; CHECK: sw $5 -; CHECK: sw $6 -; CHECK: sw $7 -; CHECK: sw $8 -; CHECK: sw $9 -; CHECK: sw $10 -; CHECK: sw $11 -; CHECK: sw $12 -; CHECK: sw $13 -; CHECK: sw $14 -; CHECK: sw $15 -; CHECK: sw $24 -; CHECK: sw $3 +; CHECK-DAG: sw $4 +; CHECK-DAG: sw $5 +; CHECK-DAG: sw $7 +; CHECK-DAG: sw $8 +; CHECK-DAG: sw $9 +; CHECK-DAG: sw $10 +; CHECK-DAG: sw $11 +; CHECK-DAG: sw $12 +; CHECK-DAG: sw $13 +; CHECK-DAG: sw $14 +; CHECK-DAG: sw $15 +; CHECK-DAG: sw $24 +; CHECK-DAG: sw $3 ; t6, t7 and t8 are reserved in NaCl and cannot be used for fastcc. ; CHECK-NACL-NOT: sw $14 @@ -223,27 +222,27 @@ define internal fastcc void @callee1(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15, float %a16, float %a17, float %a18, float %a19, float %a20) nounwind noinline { entry: -; CHECK: callee1 -; CHECK: swc1 $f0 -; CHECK: swc1 $f1 -; CHECK: swc1 $f2 -; CHECK: swc1 $f3 -; CHECK: swc1 $f4 -; CHECK: swc1 $f5 -; CHECK: swc1 $f6 -; CHECK: swc1 $f7 -; CHECK: swc1 $f8 -; CHECK: swc1 $f9 -; CHECK: swc1 $f10 -; CHECK: swc1 $f11 -; CHECK: swc1 $f12 -; CHECK: swc1 $f13 -; CHECK: swc1 $f14 -; CHECK: swc1 $f15 -; CHECK: swc1 $f16 -; CHECK: swc1 $f17 -; CHECK: swc1 $f18 -; CHECK: swc1 $f19 +; CHECK-LABEL: callee1: +; CHECK-DAG: swc1 $f0 +; CHECK-DAG: swc1 $f1 +; CHECK-DAG: swc1 $f2 +; CHECK-DAG: swc1 $f3 +; CHECK-DAG: swc1 $f4 +; CHECK-DAG: swc1 $f5 +; CHECK-DAG: swc1 $f6 +; CHECK-DAG: swc1 $f7 +; CHECK-DAG: swc1 $f8 +; CHECK-DAG: swc1 $f9 +; CHECK-DAG: swc1 $f10 +; CHECK-DAG: swc1 $f11 +; CHECK-DAG: swc1 $f12 +; CHECK-DAG: swc1 $f13 +; CHECK-DAG: swc1 $f14 +; CHECK-DAG: swc1 $f15 +; CHECK-DAG: swc1 $f16 +; CHECK-DAG: swc1 $f17 +; CHECK-DAG: swc1 $f18 +; CHECK-DAG: swc1 $f19 store float %a0, float* @gf0, align 4 store float %a1, float* @gf1, align 4 @@ -290,7 +289,6 @@ ; NOODDSPREG-DAG: lwc1 $f18, 36($[[R0]]) ; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 40($[[R0]]) -; NOODDSPREG-DAG: swc1 $[[F0]], 8($sp) %0 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4 %1 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4 @@ -316,8 +314,6 @@ ; NOODDSPREG-LABEL: callee2: -; NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]] - ; Check that first 10 arguments are received in even float registers ; f0, f2, ... , f18. Check that 11th argument is received on stack. @@ -333,7 +329,7 @@ ; NOODDSPREG-DAG: swc1 $f16, 32($[[R0]]) ; NOODDSPREG-DAG: swc1 $f18, 36($[[R0]]) -; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) +; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 0($sp) ; NOODDSPREG-DAG: swc1 $[[F0]], 40($[[R0]]) store float %a0, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4 @@ -397,7 +393,6 @@ ; FP64-NOODDSPREG-LABEL: callee3: -; FP64-NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]] ; Check that first 10 arguments are received in even float registers ; f0, f2, ... , f18. Check that 11th argument is received on stack. @@ -414,7 +409,7 @@ ; FP64-NOODDSPREG-DAG: sdc1 $f16, 64($[[R0]]) ; FP64-NOODDSPREG-DAG: sdc1 $f18, 72($[[R0]]) -; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) +; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 0($sp) ; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 80($[[R0]]) store double %a0, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8 Index: test/CodeGen/Mips/load-store-left-right.ll =================================================================== --- test/CodeGen/Mips/load-store-left-right.ll +++ test/CodeGen/Mips/load-store-left-right.ll @@ -250,12 +250,18 @@ ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)( ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)( -; FIXME: We should be able to do better than this on MIPS32r6/MIPS64r6 since -; we have unaligned halfword load/store available -; ALL-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; ALL-DAG: sb $[[R1]], 2($[[PTR]]) -; ALL-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; ALL-DAG: sb $[[R1]], 3($[[PTR]]) +; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-DAG: sb $[[R1]], 2($[[PTR]]) +; MIPS32-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]]) +; MIPS32-DAG: sb $[[R2]], 3($[[PTR]]) + +; MIPS32R6: lhu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32R6: sh $[[R1]], 2($[[PTR]]) + +; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-DAG: sb $[[R1]], 2($[[PTR]]) +; MIPS64-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]]) +; MIPS64-DAG: sb $[[R2]], 3($[[PTR]]) %0 = load %struct.S0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 0), align 1 store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 1), align 1 @@ -268,37 +274,54 @@ ; MIPS32-EL: lw $[[PTR:[0-9]+]], %got(struct_s1)( ; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s1)( -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 4($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 5($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 6($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 7($[[PTR]]) +; MIPS32-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS32-EL-DAG: lwr $[[R1]], 0($[[PTR]]) +; MIPS32-EL-DAG: swl $[[R1]], 7($[[PTR]]) +; MIPS32-EL-DAG: swr $[[R1]], 4($[[PTR]]) +; MIPS32-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS32-EB-DAG: swl $[[R1]], 4($[[PTR]]) +; MIPS32-EB-DAG: swr $[[R1]], 7($[[PTR]]) + +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]]) ; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s1)( -; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS32R6-DAG: sh $[[R1]], 4($[[PTR]]) -; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS32R6-DAG: sh $[[R1]], 6($[[PTR]]) +; MIPS32R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32R6-DAG: sw $[[R1]], 4($[[PTR]]) ; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 4($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 5($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 6($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 7($[[PTR]]) + +; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) +; MIPS64-EL-DAG: swl $[[R1]], 7($[[PTR]]) +; MIPS64-EL-DAG: swr $[[R1]], 4($[[PTR]]) + +; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS64-EB-DAG: swl $[[R1]], 4($[[PTR]]) +; MIPS64-EB-DAG: swr $[[R1]], 7($[[PTR]]) + + +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]]) ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( -; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64R6-DAG: sh $[[R1]], 4($[[PTR]]) -; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS64R6-DAG: sh $[[R1]], 6($[[PTR]]) +; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64R6-DAG: sw $[[R1]], 4($[[PTR]]) %0 = load %struct.S1, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 0), align 1 store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 1), align 1 @@ -336,30 +359,21 @@ ; MIPS32R6-DAG: sw $[[R1]], 12($[[PTR]]) ; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) -; MIPS64-EL-DAG: swl $[[R1]], 11($[[PTR]]) -; MIPS64-EL-DAG: swr $[[R1]], 8($[[PTR]]) -; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 7($[[PTR]]) -; MIPS64-EL-DAG: lwr $[[R1]], 4($[[PTR]]) -; MIPS64-EL-DAG: swl $[[R1]], 15($[[PTR]]) -; MIPS64-EL-DAG: swr $[[R1]], 12($[[PTR]]) + +; MIPS64-EL-DAG: ldl $[[R1:[0-9]+]], 7($[[PTR]]) +; MIPS64-EL-DAG: ldr $[[R1]], 0($[[PTR]]) +; MIPS64-EL-DAG: sdl $[[R1]], 15($[[PTR]]) +; MIPS64-EL-DAG: sdr $[[R1]], 8($[[PTR]]) ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) -; MIPS64-EB-DAG: swl $[[R1]], 8($[[PTR]]) -; MIPS64-EB-DAG: swr $[[R1]], 11($[[PTR]]) -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 4($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 7($[[PTR]]) -; MIPS64-EB-DAG: swl $[[R1]], 12($[[PTR]]) -; MIPS64-EB-DAG: swr $[[R1]], 15($[[PTR]]) +; MIPS64-EB-DAG: ldl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: ldr $[[R1]], 7($[[PTR]]) +; MIPS64-EB-DAG: sdl $[[R1]], 8($[[PTR]]) +; MIPS64-EB-DAG: sdr $[[R1]], 15($[[PTR]]) ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64R6-DAG: sw $[[R1]], 8($[[PTR]]) -; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]]) -; MIPS64R6-DAG: sw $[[R1]], 12($[[PTR]]) +; MIPS64R6-DAG: ld $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64R6-DAG: sd $[[R1]], 8($[[PTR]]) %0 = load %struct.S2, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 0), align 1 store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 1), align 1 @@ -416,17 +430,17 @@ ; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) ; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) -; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)( -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) -; MIPS64-EB-DAG: dsll $[[R1]], $[[R1]], 32 +; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)( ; MIPS64-EB-DAG: lbu $[[R2:[0-9]+]], 5($[[PTR]]) ; MIPS64-EB-DAG: lbu $[[R3:[0-9]+]], 4($[[PTR]]) ; MIPS64-EB-DAG: dsll $[[T0:[0-9]+]], $[[R3]], 8 ; MIPS64-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R2]] -; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16 -; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R1]], $[[T1]] ; MIPS64-EB-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]]) +; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16 +; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS64-EB-DAG: dsll $[[R5:[0-9]+]], $[[R1]], 32 +; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R5]], $[[T1]] ; MIPS64-EB-DAG: dsll $[[T4:[0-9]+]], $[[R4]], 8 ; MIPS64-EB-DAG: or $4, $[[T3]], $[[T4]] Index: test/CodeGen/Mips/micromips-li.ll =================================================================== --- test/CodeGen/Mips/micromips-li.ll +++ test/CodeGen/Mips/micromips-li.ll @@ -13,6 +13,6 @@ ret i32 0 } -; CHECK: li16 ${{[2-7]|16|17}}, 1 ; CHECK: addiu ${{[0-9]+}}, $zero, 2148 +; CHECK: li16 ${{[2-7]|16|17}}, 1 ; CHECK: ori ${{[0-9]+}}, $zero, 33332 Index: test/CodeGen/Mips/mips64-f128.ll =================================================================== --- test/CodeGen/Mips/mips64-f128.ll +++ test/CodeGen/Mips/mips64-f128.ll @@ -573,10 +573,10 @@ ; ALL-LABEL: store_LD_LD: ; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1) -; ALL: ld $[[R1:[0-9]+]], 0($[[R0]]) ; ALL: ld $[[R2:[0-9]+]], 8($[[R0]]) ; ALL: ld $[[R3:[0-9]+]], %got_disp(gld0) ; ALL: sd $[[R2]], 8($[[R3]]) +; ALL: ld $[[R1:[0-9]+]], 0($[[R0]]) ; ALL: sd $[[R1]], 0($[[R3]]) define void @store_LD_LD() { Index: test/CodeGen/Mips/mno-ldc1-sdc1.ll =================================================================== --- test/CodeGen/Mips/mno-ldc1-sdc1.ll +++ test/CodeGen/Mips/mno-ldc1-sdc1.ll @@ -130,12 +130,12 @@ ; MM-MNO-PIC: addiu $[[R1:[0-9]+]], $[[R0]], %lo(_gp_disp) ; MM-MNO-PIC: addu $[[R2:[0-9]+]], $[[R1]], $25 ; MM-MNO-PIC: lw $[[R3:[0-9]+]], %got(g0)($[[R2]]) -; MM-MNO-PIC: lw16 $[[R4:[0-9]+]], 0($[[R3]]) -; MM-MNO-PIC: lw16 $[[R5:[0-9]+]], 4($[[R3]]) -; MM-MNO-LE-PIC: mtc1 $[[R4]], $f0 -; MM-MNO-LE-PIC: mthc1 $[[R5]], $f0 -; MM-MNO-BE-PIC: mtc1 $[[R5]], $f0 -; MM-MNO-BE-PIC: mthc1 $[[R4]], $f0 +; MM-MNO-PIC-DAG: lw16 $[[R4:[0-9]+]], 0($[[R3]]) +; MM-MNO-PIC-DAG: lw16 $[[R5:[0-9]+]], 4($[[R3]]) +; MM-MNO-LE-PIC-DAG: mtc1 $[[R4]], $f0 +; MM-MNO-LE-PIC-DAG: mthc1 $[[R5]], $f0 +; MM-MNO-BE-PIC-DAG: mtc1 $[[R5]], $f0 +; MM-MNO-BE-PIC-DAG: mthc1 $[[R4]], $f0 ; MM-STATIC-PIC: lui $[[R0:[0-9]+]], %hi(g0) ; MM-STATIC-PIC: ldc1 $f0, %lo(g0)($[[R0]]) @@ -214,13 +214,13 @@ ; MM-MNO-PIC: lui $[[R0:[0-9]+]], %hi(_gp_disp) ; MM-MNO-PIC: addiu $[[R1:[0-9]+]], $[[R0]], %lo(_gp_disp) ; MM-MNO-PIC: addu $[[R2:[0-9]+]], $[[R1]], $25 -; MM-MNO-LE-PIC: mfc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfhc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-PIC: lw $[[R4:[0-9]+]], %got(g0)($[[R2]]) -; MM-MNO-PIC: sw16 $[[R3]], 0($[[R4]]) -; MM-MNO-LE-PIC: mfhc1 $[[R5:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfc1 $[[R5:[0-9]+]], $f12 -; MM-MNO-PIC: sw16 $[[R5]], 4($[[R4]]) +; MM-MNO-LE-PIC-DAG: mfc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfhc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: lw $[[R4:[0-9]+]], %got(g0)($[[R2]]) +; MM-MNO-PIC-DAG: sw16 $[[R3]], 0($[[R4]]) +; MM-MNO-LE-PIC-DAG: mfhc1 $[[R5:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfc1 $[[R5:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: sw16 $[[R5]], 4($[[R4]]) ; MM-STATIC-PIC: lui $[[R0:[0-9]+]], %hi(g0) ; MM-STATIC-PIC: sdc1 $f12, %lo(g0)($[[R0]]) @@ -267,8 +267,8 @@ ; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $5, 3 ; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $4, $[[R0]] -; MM-MNO-PIC: lw16 $[[R2:[0-9]+]], 0($[[R1]]) -; MM-MNO-PIC: lw16 $[[R3:[0-9]+]], 4($[[R1]]) +; MM-MNO-PIC-DAG: lw16 $[[R2:[0-9]+]], 0($[[R1]]) +; MM-MNO-PIC-DAG: lw16 $[[R3:[0-9]+]], 4($[[R1]]) ; MM-MNO-LE-PIC: mtc1 $[[R2]], $f0 ; MM-MNO-LE-PIC: mthc1 $[[R3]], $f0 ; MM-MNO-BE-PIC: mtc1 $[[R3]], $f0 @@ -313,14 +313,14 @@ ; MM: addu16 $[[R1:[0-9]+]], $6, $[[R0]] ; MM: sdc1 $f12, 0($[[R1]]) -; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $7, 3 -; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]] -; MM-MNO-LE-PIC: mfc1 $[[R2:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfhc1 $[[R2:[0-9]+]], $f12 -; MM-MNO-PIC: sw16 $[[R2]], 0($[[R1]]) -; MM-MNO-LE-PIC: mfhc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-PIC: sw16 $[[R3]], 4($[[R1]]) +; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $7, 3 +; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]] +; MM-MNO-LE-PIC-DAG: mfc1 $[[R2:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfhc1 $[[R2:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: sw16 $[[R2]], 0($[[R1]]) +; MM-MNO-LE-PIC-DAG: mfhc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: sw16 $[[R3]], 4($[[R1]]) ; MM-STATIC-PIC: sll16 $[[R0:[0-9]+]], $7, 3 ; MM-STATIC-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]] Index: test/CodeGen/Mips/msa/i5_ld_st.ll =================================================================== --- test/CodeGen/Mips/msa/i5_ld_st.ll +++ test/CodeGen/Mips/msa/i5_ld_st.ll @@ -336,8 +336,8 @@ ; CHECK: llvm_mips_st_b_valid_range_tests: ; CHECK: ld.b -; CHECK: st.b [[R1:\$w[0-9]+]], -512( -; CHECK: st.b [[R1:\$w[0-9]+]], 511( +; CHECK-DAG: st.b [[R1:\$w[0-9]+]], -512( +; CHECK-DAG: st.b [[R1:\$w[0-9]+]], 511( ; CHECK: .size llvm_mips_st_b_valid_range_tests ; @@ -351,10 +351,10 @@ } ; CHECK: llvm_mips_st_b_invalid_range_tests: -; CHECK: addiu $2, $1, -513 +; CHECK: addiu $2, $1, 512 ; CHECK: ld.b ; CHECK: st.b [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 512 +; CHECK: addiu $1, $1, -513 ; CHECK: st.b [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_b_invalid_range_tests ; @@ -404,8 +404,8 @@ ; CHECK: llvm_mips_st_h_valid_range_tests: ; CHECK: ld.h -; CHECK: st.h [[R1:\$w[0-9]+]], -1024( -; CHECK: st.h [[R1:\$w[0-9]+]], 1022( +; CHECK-DAG: st.h [[R1:\$w[0-9]+]], -1024( +; CHECK-DAG: st.h [[R1:\$w[0-9]+]], 1022( ; CHECK: .size llvm_mips_st_h_valid_range_tests ; @@ -419,10 +419,10 @@ } ; CHECK: llvm_mips_st_h_invalid_range_tests: -; CHECK: addiu $2, $1, -1026 +; CHECK: addiu $2, $1, 1024 ; CHECK: ld.h ; CHECK: st.h [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 1024 +; CHECK: addiu $1, $1, -1026 ; CHECK: st.h [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_h_invalid_range_tests ; @@ -472,8 +472,8 @@ ; CHECK: llvm_mips_st_w_valid_range_tests: ; CHECK: ld.w -; CHECK: st.w [[R1:\$w[0-9]+]], -2048( -; CHECK: st.w [[R1:\$w[0-9]+]], 2044( +; CHECK-DAG: st.w [[R1:\$w[0-9]+]], -2048( +; CHECK-DAG: st.w [[R1:\$w[0-9]+]], 2044( ; CHECK: .size llvm_mips_st_w_valid_range_tests ; @@ -487,10 +487,10 @@ } ; CHECK: llvm_mips_st_w_invalid_range_tests: -; CHECK: addiu $2, $1, -2052 +; CHECK: addiu $2, $1, 2048 ; CHECK: ld.w ; CHECK: st.w [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 2048 +; CHECK: addiu $1, $1, -2052 ; CHECK: st.w [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_w_invalid_range_tests ; @@ -540,8 +540,8 @@ ; CHECK: llvm_mips_st_d_valid_range_tests: ; CHECK: ld.d -; CHECK: st.d [[R1:\$w[0-9]+]], -4096( -; CHECK: st.d [[R1:\$w[0-9]+]], 4088( +; CHECK-DAG: st.d [[R1:\$w[0-9]+]], -4096( +; CHECK-DAG: st.d [[R1:\$w[0-9]+]], 4088( ; CHECK: .size llvm_mips_st_d_valid_range_tests ; @@ -555,10 +555,10 @@ } ; CHECK: llvm_mips_st_d_invalid_range_tests: -; CHECK: addiu $2, $1, -4104 +; CHECK: addiu $2, $1, 4096 ; CHECK: ld.d ; CHECK: st.d [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 4096 +; CHECK: addiu $1, $1, -4104 ; CHECK: st.d [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_d_invalid_range_tests ; Index: test/CodeGen/Mips/o32_cc_byval.ll =================================================================== --- test/CodeGen/Mips/o32_cc_byval.ll +++ test/CodeGen/Mips/o32_cc_byval.ll @@ -45,20 +45,18 @@ define void @f2(float %f, %struct.S1* nocapture byval %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 -; CHECK: sw $7, 60($sp) -; CHECK: sw $6, 56($sp) -; CHECK: lw $4, 80($sp) -; CHECK: ldc1 $f[[F0:[0-9]+]], 72($sp) -; CHECK: lw $[[R3:[0-9]+]], 64($sp) -; CHECK: lw $[[R4:[0-9]+]], 68($sp) -; CHECK: lw $[[R2:[0-9]+]], 60($sp) -; CHECK: lh $[[R1:[0-9]+]], 58($sp) -; CHECK: lb $[[R0:[0-9]+]], 56($sp) -; CHECK: sw $[[R0]], 32($sp) -; CHECK: sw $[[R1]], 28($sp) -; CHECK: sw $[[R2]], 24($sp) -; CHECK: sw $[[R4]], 20($sp) -; CHECK: sw $[[R3]], 16($sp) +; CHECK-DAG: sw $7, 60($sp) +; CHECK-DAG: sw $6, 56($sp) +; CHECK-DAG: ldc1 $f[[F0:[0-9]+]], 72($sp) +; CHECK-DAG: lw $[[R3:[0-9]+]], 64($sp) +; CHECK-DAG: lw $[[R4:[0-9]+]], 68($sp) +; CHECK-DAG: lh $[[R1:[0-9]+]], 58($sp) +; CHECK-DAG: lb $[[R0:[0-9]+]], 56($sp) +; CHECK-DAG: sw $[[R0]], 32($sp) +; CHECK-DAG: sw $[[R1]], 28($sp) +; CHECK-DAG: sw $[[R4]], 20($sp) +; CHECK-DAG: sw $[[R3]], 16($sp) +; CHECK-DAG: sw $7, 24($sp) ; CHECK: mfc1 $6, $f[[F0]] %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5 @@ -86,9 +84,7 @@ ; CHECK: sw $6, 56($sp) ; CHECK: sw $5, 52($sp) ; CHECK: sw $4, 48($sp) -; CHECK: lw $4, 48($sp) -; CHECK: lw $[[R0:[0-9]+]], 60($sp) -; CHECK: sw $[[R0]], 24($sp) +; CHECK: sw $7, 24($sp) %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0 %tmp = load i32, i32* %arrayidx, align 4 @@ -101,14 +97,14 @@ define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture byval %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 -; CHECK: sw $7, 60($sp) -; CHECK: sw $6, 56($sp) -; CHECK: sw $5, 52($sp) -; CHECK: lw $4, 60($sp) -; CHECK: lw $[[R1:[0-9]+]], 80($sp) -; CHECK: lb $[[R0:[0-9]+]], 52($sp) -; CHECK: sw $[[R0]], 32($sp) -; CHECK: sw $[[R1]], 24($sp) +; CHECK-DAG: sw $7, 60($sp) +; CHECK-DAG: sw $6, 56($sp) +; CHECK-DAG: sw $5, 52($sp) +; CHECK-DAG: lw $[[R1:[0-9]+]], 80($sp) +; CHECK-DAG: lb $[[R0:[0-9]+]], 52($sp) +; CHECK-DAG: sw $[[R0]], 32($sp) +; CHECK-DAG: sw $[[R1]], 24($sp) +; CHECK: move $4, $7 %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 %tmp = load i32, i32* %i, align 4 Index: test/CodeGen/Mips/o32_cc_vararg.ll =================================================================== --- test/CodeGen/Mips/o32_cc_vararg.ll +++ test/CodeGen/Mips/o32_cc_vararg.ll @@ -29,9 +29,9 @@ ; CHECK-LABEL: va1: ; CHECK: addiu $sp, $sp, -16 +; CHECK: sw $5, 20($sp) ; CHECK: sw $7, 28($sp) ; CHECK: sw $6, 24($sp) -; CHECK: sw $5, 20($sp) ; CHECK: lw $2, 20($sp) } @@ -83,8 +83,8 @@ ; CHECK-LABEL: va3: ; CHECK: addiu $sp, $sp, -16 -; CHECK: sw $7, 28($sp) ; CHECK: sw $6, 24($sp) +; CHECK: sw $7, 28($sp) ; CHECK: lw $2, 24($sp) } Index: test/CodeGen/PowerPC/anon_aggr.ll =================================================================== --- test/CodeGen/PowerPC/anon_aggr.ll +++ test/CodeGen/PowerPC/anon_aggr.ll @@ -60,10 +60,9 @@ unequal: ret i8* %array2_ptr } - ; CHECK-LABEL: func2: -; CHECK: ld [[REG2:[0-9]+]], 72(1) -; CHECK: cmpld {{([0-9]+,)?}}4, [[REG2]] +; CHECK: cmpld {{([0-9]+,)?}}4, 6 +; CHECK: mr [[REG2:[0-9]+]], 6 ; CHECK-DAG: std [[REG2]], -[[OFFSET1:[0-9]+]] ; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]] ; CHECK: ld 3, -[[OFFSET2]](1) @@ -85,8 +84,8 @@ ; DARWIN64: mr ; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]] ; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]] -; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]] ; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]] +; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]] ; DARWIN64: ld r3, -[[OFFSET1]] ; DARWIN64: ld r3, -[[OFFSET2]] @@ -106,24 +105,24 @@ } ; CHECK-LABEL: func3: -; CHECK: ld [[REG3:[0-9]+]], 72(1) -; CHECK: ld [[REG4:[0-9]+]], 56(1) -; CHECK: cmpld {{([0-9]+,)?}}[[REG4]], [[REG3]] -; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1) +; CHECK: cmpld {{([0-9]+,)?}}4, 6 +; CHECK: mr [[REG3:[0-9]+]], 6 +; CHECK: mr [[REG4:[0-9]+]], 4 ; CHECK: std [[REG4]], -[[OFFSET2:[0-9]+]](1) +; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1) ; CHECK: ld 3, -[[OFFSET2]](1) ; CHECK: ld 3, -[[OFFSET1]](1) ; DARWIN32: _func3: -; DARWIN32: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36 -; DARWIN32: addi r[[REG2:[0-9]+]], r[[REGSP]], 24 -; DARWIN32: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]]) -; DARWIN32: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]]) +; DARWIN32-DAG: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36 +; DARWIN32-DAG: addi r[[REG2:[0-9]+]], r[[REGSP]], 24 +; DARWIN32-DAG: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]]) +; DARWIN32-DAG: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]]) ; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]] -; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]] -; DARWIN32: stw r[[REG4]], -[[OFFSET2:[0-9]+]] -; DARWIN32: lwz r3, -[[OFFSET2]] -; DARWIN32: lwz r3, -[[OFFSET1]] +; DARWIN32-DAG: stw r[[REG3]], -[[OFFSET1:[0-9]+]] +; DARWIN32-DAG: stw r[[REG4]], -[[OFFSET2:[0-9]+]] +; DARWIN32-DAG: lwz r3, -[[OFFSET1:[0-9]+]] +; DARWIN32-DAG: lwz r3, -[[OFFSET2:[0-9]+]] ; DARWIN64: _func3: ; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1) Index: test/CodeGen/PowerPC/complex-return.ll =================================================================== --- test/CodeGen/PowerPC/complex-return.ll +++ test/CodeGen/PowerPC/complex-return.ll @@ -24,10 +24,10 @@ } ; CHECK-LABEL: foo: -; CHECK: lfd 1 -; CHECK: lfd 2 -; CHECK: lfd 3 -; CHECK: lfd 4 +; CHECK-DAG: lfd 1 +; CHECK-DAG: fmr 2 +; CHECK-DAG: lfd 3 +; CHECK-DAG: lfd 4 define { float, float } @oof() nounwind { entry: @@ -50,6 +50,6 @@ } ; CHECK-LABEL: oof: -; CHECK: lfs 2 -; CHECK: lfs 1 +; CHECK-DAG: lfs 2 +; CHECK-DAG: lfs 1 Index: test/CodeGen/PowerPC/jaggedstructs.ll =================================================================== --- test/CodeGen/PowerPC/jaggedstructs.ll +++ test/CodeGen/PowerPC/jaggedstructs.ll @@ -18,14 +18,14 @@ ret void } -; CHECK: std 6, 184(1) -; CHECK: std 5, 176(1) -; CHECK: std 4, 168(1) -; CHECK: std 3, 160(1) -; CHECK: lbz {{[0-9]+}}, 167(1) -; CHECK: lhz {{[0-9]+}}, 165(1) -; CHECK: stb {{[0-9]+}}, 55(1) -; CHECK: sth {{[0-9]+}}, 53(1) +; CHECK-DAG: std 3, 160(1) +; CHECK-DAG: std 6, 184(1) +; CHECK-DAG: std 5, 176(1) +; CHECK-DAG: std 4, 168(1) +; CHECK-DAG: lbz {{[0-9]+}}, 167(1) +; CHECK-DAG: lhz {{[0-9]+}}, 165(1) +; CHECK-DAG: stb {{[0-9]+}}, 55(1) +; CHECK-DAG: sth {{[0-9]+}}, 53(1) ; CHECK: lbz {{[0-9]+}}, 175(1) ; CHECK: lwz {{[0-9]+}}, 171(1) ; CHECK: stb {{[0-9]+}}, 63(1) Index: test/CodeGen/PowerPC/ppc64-align-long-double.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-align-long-double.ll +++ test/CodeGen/PowerPC/ppc64-align-long-double.ll @@ -18,19 +18,35 @@ ret ppc_fp128 %0 } -; CHECK-DAG: std 6, 72(1) -; CHECK-DAG: std 5, 64(1) -; CHECK-DAG: std 4, 56(1) -; CHECK-DAG: std 3, 48(1) -; CHECK: lfd 1, 64(1) -; CHECK: lfd 2, 72(1) +;; FIXME: Sadly, we now have an extra store to a temp variable here, +;; which comes from (roughly): +;; store i64 to i64* +;; bitcast (load i64* ) to f64 +;; The code now can elide the load, making: +;; store i64 -> +;; bitcast i64 to f64 +;; Finally, the bitcast itself turns into a store/load pair. +;; +;; This behavior is new, because previously, llvm was accidentally +;; unable to detect that the load came directly from the store, and +;; elide it. -; CHECK-VSX-DAG: std 6, 72(1) -; CHECK-VSX-DAG: std 5, 64(1) -; CHECK-VSX-DAG: std 4, 56(1) -; CHECK-VSX-DAG: std 3, 48(1) -; CHECK-VSX: li 3, 16 -; CHECK-VSX: addi 4, 1, 48 -; CHECK-VSX: lxsdx 1, 4, 3 -; CHECK-VSX: li 3, 24 -; CHECK-VSX: lxsdx 2, 4, 3 +; CHECK: std 6, 72(1) +; CHECK: std 5, 64(1) +; CHECK: std 4, 56(1) +; CHECK: std 3, 48(1) +; CHECK: std 5, -16(1) +; CHECK: std 6, -8(1) +; CHECK: lfd 1, -16(1) +; CHECK: lfd 2, -8(1) + +; CHECK-VSX: std 6, 72(1) +; CHECK-VSX: std 5, 64(1) +; CHECK-VSX: std 4, 56(1) +; CHECK-VSX: std 3, 48(1) +; CHECK-VSX: std 5, -16(1) +; CHECK-VSX: std 6, -8(1) +; CHECK-VSX: addi 3, 1, -16 +; CHECK-VSX: lxsdx 1, 0, 3 +; CHECK-VSX: addi 3, 1, -8 +; CHECK-VSX: lxsdx 2, 0, 3 Index: test/CodeGen/PowerPC/structsinmem.ll =================================================================== --- test/CodeGen/PowerPC/structsinmem.ll +++ test/CodeGen/PowerPC/structsinmem.ll @@ -113,13 +113,13 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: lha {{[0-9]+}}, 126(1) -; CHECK: lha {{[0-9]+}}, 132(1) -; CHECK: lbz {{[0-9]+}}, 119(1) -; CHECK: lwz {{[0-9]+}}, 140(1) -; CHECK: lwz {{[0-9]+}}, 144(1) -; CHECK: lwz {{[0-9]+}}, 152(1) -; CHECK: lwz {{[0-9]+}}, 160(1) +; CHECK-DAG: lha {{[0-9]+}}, 126(1) +; CHECK-DAG: lha {{[0-9]+}}, 132(1) +; CHECK-DAG: lbz {{[0-9]+}}, 119(1) +; CHECK-DAG: lwz {{[0-9]+}}, 140(1) +; CHECK-DAG: lwz {{[0-9]+}}, 144(1) +; CHECK-DAG: lwz {{[0-9]+}}, 152(1) +; CHECK-DAG: lwz {{[0-9]+}}, 160(1) } define i32 @caller2() nounwind { @@ -205,11 +205,11 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: lha {{[0-9]+}}, 126(1) -; CHECK: lha {{[0-9]+}}, 133(1) -; CHECK: lbz {{[0-9]+}}, 119(1) -; CHECK: lwz {{[0-9]+}}, 140(1) -; CHECK: lwz {{[0-9]+}}, 147(1) -; CHECK: lwz {{[0-9]+}}, 154(1) -; CHECK: lwz {{[0-9]+}}, 161(1) +; CHECK-DAG: lha {{[0-9]+}}, 126(1) +; CHECK-DAG: lha {{[0-9]+}}, 133(1) +; CHECK-DAG: lbz {{[0-9]+}}, 119(1) +; CHECK-DAG: lwz {{[0-9]+}}, 140(1) +; CHECK-DAG: lwz {{[0-9]+}}, 147(1) +; CHECK-DAG: lwz {{[0-9]+}}, 154(1) +; CHECK-DAG: lwz {{[0-9]+}}, 161(1) } Index: test/CodeGen/PowerPC/structsinregs.ll =================================================================== --- test/CodeGen/PowerPC/structsinregs.ll +++ test/CodeGen/PowerPC/structsinregs.ll @@ -59,6 +59,7 @@ %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) ret i32 %call +; CHECK-LABEL: caller1 ; CHECK: ld 9, 112(31) ; CHECK: ld 8, 120(31) ; CHECK: ld 7, 128(31) @@ -97,20 +98,21 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: std 9, 96(1) -; CHECK: std 8, 88(1) -; CHECK: std 7, 80(1) -; CHECK: stw 6, 76(1) -; CHECK: stw 5, 68(1) -; CHECK: sth 4, 62(1) -; CHECK: stb 3, 55(1) -; CHECK: lha {{[0-9]+}}, 62(1) -; CHECK: lha {{[0-9]+}}, 68(1) -; CHECK: lbz {{[0-9]+}}, 55(1) -; CHECK: lwz {{[0-9]+}}, 76(1) -; CHECK: lwz {{[0-9]+}}, 80(1) -; CHECK: lwz {{[0-9]+}}, 88(1) -; CHECK: lwz {{[0-9]+}}, 96(1) +; CHECK-LABEL: callee1 +; CHECK-DAG: std 9, 96(1) +; CHECK-DAG: std 8, 88(1) +; CHECK-DAG: std 7, 80(1) +; CHECK-DAG: stw 6, 76(1) +; CHECK-DAG: stw 5, 68(1) +; CHECK-DAG: sth 4, 62(1) +; CHECK-DAG: stb 3, 55(1) +; CHECK-DAG: lha {{[0-9]+}}, 62(1) +; CHECK-DAG: lha {{[0-9]+}}, 68(1) +; CHECK-DAG: lbz {{[0-9]+}}, 55(1) +; CHECK-DAG: lwz {{[0-9]+}}, 76(1) +; CHECK-DAG: lwz {{[0-9]+}}, 80(1) +; CHECK-DAG: lwz {{[0-9]+}}, 88(1) +; CHECK-DAG: lwz {{[0-9]+}}, 96(1) } define i32 @caller2() nounwind { @@ -139,6 +141,7 @@ %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) ret i32 %call +; CHECK-LABEL: caller2 ; CHECK: stb {{[0-9]+}}, 71(1) ; CHECK: sth {{[0-9]+}}, 69(1) ; CHECK: stb {{[0-9]+}}, 87(1) @@ -184,18 +187,19 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: std 9, 96(1) -; CHECK: std 8, 88(1) -; CHECK: std 7, 80(1) -; CHECK: stw 6, 76(1) -; CHECK: std 5, 64(1) -; CHECK: sth 4, 62(1) -; CHECK: stb 3, 55(1) -; CHECK: lha {{[0-9]+}}, 62(1) -; CHECK: lha {{[0-9]+}}, 69(1) -; CHECK: lbz {{[0-9]+}}, 55(1) -; CHECK: lwz {{[0-9]+}}, 76(1) -; CHECK: lwz {{[0-9]+}}, 83(1) -; CHECK: lwz {{[0-9]+}}, 90(1) -; CHECK: lwz {{[0-9]+}}, 97(1) +; CHECK-LABEL: callee2 +; CHECK-DAG: std 9, 96(1) +; CHECK-DAG: std 8, 88(1) +; CHECK-DAG: std 7, 80(1) +; CHECK-DAG: stw 6, 76(1) +; CHECK-DAG: std 5, 64(1) +; CHECK-DAG: sth 4, 62(1) +; CHECK-DAG: stb 3, 55(1) +; CHECK-DAG: lha {{[0-9]+}}, 62(1) +; CHECK-DAG: lha {{[0-9]+}}, 69(1) +; CHECK-DAG: lbz {{[0-9]+}}, 55(1) +; CHECK-DAG: lwz {{[0-9]+}}, 76(1) +; CHECK-DAG: lwz {{[0-9]+}}, 83(1) +; CHECK-DAG: lwz {{[0-9]+}}, 90(1) +; CHECK-DAG: lwz {{[0-9]+}}, 97(1) } Index: test/CodeGen/SystemZ/unaligned-01.ll =================================================================== --- test/CodeGen/SystemZ/unaligned-01.ll +++ test/CodeGen/SystemZ/unaligned-01.ll @@ -1,10 +1,7 @@ ; Check that unaligned accesses are allowed in general. We check the ; few exceptions (like CRL) in their respective test files. ; -; FIXME: -combiner-alias-analysis (the default for SystemZ) stops -; f1 from being optimized. -; RUN: llc < %s -mtriple=s390x-linux-gnu -combiner-alias-analysis=false \ -; RUN: | FileCheck %s +; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s ; Check that these four byte stores become a single word store. define void @f1(i8 *%ptr) { Index: test/CodeGen/Thumb/2010-07-15-debugOrdering.ll =================================================================== --- test/CodeGen/Thumb/2010-07-15-debugOrdering.ll +++ test/CodeGen/Thumb/2010-07-15-debugOrdering.ll @@ -9,9 +9,9 @@ define void @_Z19getClosestDiagonal3ii(%0* noalias sret, i32, i32) nounwind { ; CHECK: bl ___muldf3 -; CHECK: bl ___muldf3 ; CHECK: beq LBB0 ; CHECK: bl ___muldf3 +; CHECK: bl ___muldf3 ;