Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -5263,6 +5263,110 @@ return false; } +/// For the instruction sequence of store below, F and I values +/// are bundled together as an i64 value before being stored into memory. +/// Sometimes it is more efficent to generate separate stores for F and I, +/// which can remove the bitwise instructions or sink them to colder places. +/// +/// (store (or (zext (bitcast F to i32) to i64), +/// (shl (zext I to i64), 32)), addr) --> +/// (store F, addr) and (store I, addr+4) +/// +/// Similarly, splitting for other merged store can also be beneficial, like: +/// For pair of {i32, i32}, i64 store --> two i32 stores. +/// For pair of {i32, i16}, i64 store --> two i32 stores. +/// For pair of {i16, i16}, i32 store --> two i16 stores. +/// For pair of {i16, i8}, i32 store --> two i16 stores. +/// For pair of {i8, i8}, i16 store --> two i8 stores. +/// +/// We allow each target to determine specifically which kind of splitting is +/// supported. +/// +/// The store patterns are commonly seen from the simple code snippet below +/// if only std::make_pair(...) is sroa transformed before inlined into hoo. +/// void goo(const std::pair &); +/// hoo() { +/// ... +/// goo(std::make_pair(tmp, ftmp)); +/// ... +/// } +/// +/// Although we already have similar splitting in DAG Combine, we duplicate +/// it in CodeGenPrepare to catch the case in which pattern is across +/// multiple BBs. The logic in DAG Combine is kept to catch case generated +/// during code expansion. +static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, + const TargetLowering &TLI) { + unsigned HalfValBitSize = + DL.getTypeSizeInBits(SI.getValueOperand()->getType()) / 2; + + // match the following patterns: + // (store (or (zext LValue to i64), + // (shl (zext HValue to i64), 32)), HalfValBitSize) + // or + // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) + // (zext LValue to i64), + // Expect both operands of OR and the first operand of SHL have only + // one use. + Value *LValue = nullptr; + Value *HValue = nullptr; + if (!match(SI.getValueOperand(), + m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), + m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), + m_SpecificInt(HalfValBitSize)))))) + return false; + + // Check LValue and HValue are int with size less or equal than 32. + if (!LValue->getType()->isIntegerTy() || + DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || + !HValue->getType()->isIntegerTy() || + DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) + return false; + + // If LValue/HValue is a bitcast instruction, use the EVT before bitcast + // as the input of target query. + EVT LowTy = EVT::getEVT(LValue->getType()); + EVT HighTy = EVT::getEVT(HValue->getType()); + if (BitCastInst *BC = dyn_cast(LValue)) + LowTy = EVT::getEVT(BC->getOperand(0)->getType(), true); + if (BitCastInst *BC = dyn_cast(HValue)) + HighTy = EVT::getEVT(BC->getOperand(0)->getType(), true); + + if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) + return false; + + // Start to split store. + IRBuilder<> Builder(SI.getContext()); + Builder.SetInsertPoint(&SI); + + // If LValue/HValue is a bitcast in another BB and has only one use, move + // it to current BB so it may be merged with the splitted stores by dag + // combiner. + BitCastInst *LBC = dyn_cast(LValue); + BitCastInst *HBC = dyn_cast(HValue); + if (LBC && LBC->hasOneUse() && LBC->getParent() != SI.getParent()) + LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); + if (HBC && HBC->hasOneUse() && HBC->getParent() != SI.getParent()) + HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); + + Type *Ty = Type::getIntNTy(SI.getContext(), HalfValBitSize); + Type *PtrTy = Ty->getPointerTo(SI.getPointerAddressSpace()); + // Create store to the lower half. + Value *Low = Builder.CreateZExtOrBitCast(LValue, Ty); + Value *LowAddr = Builder.CreateBitCast(SI.getOperand(1), PtrTy); + Builder.CreateAlignedStore(Low, LowAddr, SI.getAlignment()); + + // Create store to the upper half. + Value *High = Builder.CreateZExtOrBitCast(HValue, Ty); + Value *HighAddr = Builder.CreateGEP( + Ty, LowAddr, ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); + Builder.CreateAlignedStore(High, HighAddr, SI.getAlignment() / 2); + + // Delete the old store and the bitwise instructions generating int64. + SI.eraseFromParent(); + return true; +} + bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) { // Bail out if we inserted the instruction to prevent optimizations from // stepping on each other's toes. @@ -5327,6 +5431,8 @@ } if (StoreInst *SI = dyn_cast(I)) { + if (TLI && splitMergedValStore(*SI, *DL, *TLI)) + return true; stripInvariantGroupMetadata(*SI); if (TLI) { unsigned AS = SI->getPointerAddressSpace(); Index: test/CodeGen/X86/split-store.ll =================================================================== --- test/CodeGen/X86/split-store.ll +++ test/CodeGen/X86/split-store.ll @@ -57,3 +57,43 @@ store i64 %t4, i64* %ref.tmp, align 8 ret void } + +; CHECK-LABEL: mbb_int32_float_pair +; CHECK: movl %edi, (%rsi) +; CHECK: movss %xmm0, 4(%rsi) +define void @mbb_int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) { +entry: + %t0 = bitcast float %tmp2 to i32 + br label %next +next: + %t1 = zext i32 %t0 to i64 + %t2 = shl nuw i64 %t1, 32 + %t3 = zext i32 %tmp1 to i64 + %t4 = or i64 %t2, %t3 + store i64 %t4, i64* %ref.tmp, align 8 + ret void +} + +; CHECK-LABEL: mbb_int32_float_multi_stores +; CHECK: movl %edi, (%rsi) +; CHECK: movss %xmm0, 4(%rsi) +; CHECK: # %bb2 +; CHECK: movl %edi, (%rdx) +; CHECK: movss %xmm0, 4(%rdx) +define void @mbb_int32_float_multi_stores(i32 %tmp1, float %tmp2, i64* %ref.tmp, i64* %ref.tmp1, i1 %cmp) { +entry: + %t0 = bitcast float %tmp2 to i32 + br label %bb1 +bb1: + %t1 = zext i32 %t0 to i64 + %t2 = shl nuw i64 %t1, 32 + %t3 = zext i32 %tmp1 to i64 + %t4 = or i64 %t2, %t3 + store i64 %t4, i64* %ref.tmp, align 8 + br i1 %cmp, label %bb2, label %exitbb +bb2: + store i64 %t4, i64* %ref.tmp1, align 8 + br label %exitbb +exitbb: + ret void +}