Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -5263,6 +5263,107 @@ return false; } +/// For the instruction sequence of store below, F and I values +/// are bundled together as an i64 value before being stored into memory. +/// Sometimes it is more efficent to generate separate stores for F and I, +/// which can remove the bitwise instructions or sink them to colder places. +/// +/// (store (or (zext (bitcast F to i32) to i64), +/// (shl (zext I to i64), 32)), addr) --> +/// (store F, addr) and (store I, addr+4) +/// +/// Similarly, splitting for other merged store can also be beneficial, like: +/// For pair of {i32, i32}, i64 store --> two i32 stores. +/// For pair of {i32, i16}, i64 store --> two i32 stores. +/// For pair of {i16, i16}, i32 store --> two i16 stores. +/// For pair of {i16, i8}, i32 store --> two i16 stores. +/// For pair of {i8, i8}, i16 store --> two i8 stores. +/// +/// We allow each target to determine specifically which kind of splitting is +/// supported. +/// +/// The store patterns are commonly seen from the simple code snippet below +/// if only std::make_pair(...) is sroa transformed before inlined into hoo. +/// void goo(const std::pair &); +/// hoo() { +/// ... +/// goo(std::make_pair(tmp, ftmp)); +/// ... +/// } +/// +/// Although we already have similar splitting in DAG Combine, we duplicate +/// it in CodeGenPrepare to catch the case in which pattern is across +/// multiple BBs. The logic in DAG Combine is kept to catch case generated +/// during code expansion. +static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, + const TargetLowering &TLI) { + unsigned HalfValBitSize = + DL.getTypeSizeInBits(SI.getValueOperand()->getType()) / 2; + + // Match the following patterns: + // (store (or (zext LValue to i64), + // (shl (zext HValue to i64), 32)), HalfValBitSize) + // or + // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) + // (zext LValue to i64), + // Expect both operands of OR and the first operand of SHL have only + // one use. + Value *LValue, *HValue; + if (!match(SI.getValueOperand(), + m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), + m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), + m_SpecificInt(HalfValBitSize)))))) + return false; + + // Check LValue and HValue are int with size less or equal than 32. + if (!LValue->getType()->isIntegerTy() || + DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || + !HValue->getType()->isIntegerTy() || + DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) + return false; + + // If LValue/HValue is a bitcast instruction, use the EVT before bitcast + // as the input of target query. + auto *LBC = dyn_cast(LValue); + auto *HBC = dyn_cast(HValue); + EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) + : EVT::getEVT(LValue->getType()); + EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) + : EVT::getEVT(HValue->getType()); + if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) + return false; + + // Start to split store. + IRBuilder<> Builder(SI.getContext()); + Builder.SetInsertPoint(&SI); + + // If LValue/HValue is a bitcast in another BB, create a new one in current + // BB so it may be merged with the splitted stores by dag combiner. + if (LBC && LBC->getParent() != SI.getParent()) + LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); + if (HBC && HBC->getParent() != SI.getParent()) + HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); + + auto CreateSplitStore = [&](Value *V, bool Upper) { + Type *Ty = Type::getIntNTy(SI.getContext(), HalfValBitSize); + V = Builder.CreateZExtOrBitCast(V, Ty); + Value *Addr = Builder.CreateBitCast( + SI.getOperand(1), Ty->getPointerTo(SI.getPointerAddressSpace())); + if (Upper) + Addr = Builder.CreateGEP( + Ty, Addr, ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); + Builder.CreateAlignedStore(V, Addr, Upper ? SI.getAlignment() / 2 + : SI.getAlignment()); + }; + + CreateSplitStore(LValue, false); + CreateSplitStore(HValue, true); + + // Delete the old store. + SI.eraseFromParent(); + return true; +} + bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) { // Bail out if we inserted the instruction to prevent optimizations from // stepping on each other's toes. @@ -5327,6 +5428,8 @@ } if (StoreInst *SI = dyn_cast(I)) { + if (TLI && splitMergedValStore(*SI, *DL, *TLI)) + return true; stripInvariantGroupMetadata(*SI); if (TLI) { unsigned AS = SI->getPointerAddressSpace(); Index: test/CodeGen/X86/split-store.ll =================================================================== --- test/CodeGen/X86/split-store.ll +++ test/CodeGen/X86/split-store.ll @@ -14,6 +14,21 @@ ret void } +; CHECK-LABEL: int31_float_pair +; CHECK: andl $2147483647, %edi +; CHECK: movl %edi, (%rsi) +; CHECK: movss %xmm0, 4(%rsi) +define void @int31_float_pair(i31 %tmp1, float %tmp2, i64* %ref.tmp) { +entry: + %t0 = bitcast float %tmp2 to i32 + %t1 = zext i32 %t0 to i64 + %t2 = shl nuw i64 %t1, 32 + %t3 = zext i31 %tmp1 to i64 + %t4 = or i64 %t2, %t3 + store i64 %t4, i64* %ref.tmp, align 8 + ret void +} + ; CHECK-LABEL: float_int32_pair ; CHECK: movss %xmm0, (%rsi) ; CHECK: movl %edi, 4(%rsi) @@ -43,6 +58,21 @@ ret void } +; CHECK-LABEL: int15_float_pair +; CHECK: andl $32767, %edi +; CHECK: movl %edi, (%rsi) +; CHECK: movss %xmm0, 4(%rsi) +define void @int15_float_pair(i15 signext %tmp1, float %tmp2, i64* %ref.tmp) { +entry: + %t0 = bitcast float %tmp2 to i32 + %t1 = zext i32 %t0 to i64 + %t2 = shl nuw i64 %t1, 32 + %t3 = zext i15 %tmp1 to i64 + %t4 = or i64 %t2, %t3 + store i64 %t4, i64* %ref.tmp, align 8 + ret void +} + ; CHECK-LABEL: int8_float_pair ; CHECK: movzbl %dil, %eax ; CHECK: movl %eax, (%rsi) @@ -57,3 +87,59 @@ store i64 %t4, i64* %ref.tmp, align 8 ret void } + +; CHECK-LABEL: int7_float_pair +; CHECK: andb $127, %dil +; CHECK: movzbl %dil, %eax +; CHECK: movl %eax, (%rsi) +; CHECK: movss %xmm0, 4(%rsi) +define void @int7_float_pair(i7 signext %tmp1, float %tmp2, i64* %ref.tmp) { +entry: + %t0 = bitcast float %tmp2 to i32 + %t1 = zext i32 %t0 to i64 + %t2 = shl nuw i64 %t1, 32 + %t3 = zext i7 %tmp1 to i64 + %t4 = or i64 %t2, %t3 + store i64 %t4, i64* %ref.tmp, align 8 + ret void +} + +; CHECK-LABEL: mbb_int32_float_pair +; CHECK: movl %edi, (%rsi) +; CHECK: movss %xmm0, 4(%rsi) +define void @mbb_int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) { +entry: + %t0 = bitcast float %tmp2 to i32 + br label %next +next: + %t1 = zext i32 %t0 to i64 + %t2 = shl nuw i64 %t1, 32 + %t3 = zext i32 %tmp1 to i64 + %t4 = or i64 %t2, %t3 + store i64 %t4, i64* %ref.tmp, align 8 + ret void +} + +; CHECK-LABEL: mbb_int32_float_multi_stores +; CHECK: movl %edi, (%rsi) +; CHECK: movss %xmm0, 4(%rsi) +; CHECK: # %bb2 +; CHECK: movl %edi, (%rdx) +; CHECK: movss %xmm0, 4(%rdx) +define void @mbb_int32_float_multi_stores(i32 %tmp1, float %tmp2, i64* %ref.tmp, i64* %ref.tmp1, i1 %cmp) { +entry: + %t0 = bitcast float %tmp2 to i32 + br label %bb1 +bb1: + %t1 = zext i32 %t0 to i64 + %t2 = shl nuw i64 %t1, 32 + %t3 = zext i32 %tmp1 to i64 + %t4 = or i64 %t2, %t3 + store i64 %t4, i64* %ref.tmp, align 8 + br i1 %cmp, label %bb2, label %exitbb +bb2: + store i64 %t4, i64* %ref.tmp1, align 8 + br label %exitbb +exitbb: + ret void +}