diff --git a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h --- a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h +++ b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h @@ -136,13 +136,9 @@ slpvectorizer::BoUpSLP &R, TargetTransformInfo *TTI); - /// Try to vectorize trees that start at insertvalue instructions. - bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB, - slpvectorizer::BoUpSLP &R); - - /// Try to vectorize trees that start at insertelement instructions. - bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB, - slpvectorizer::BoUpSLP &R); + /// Tries to match and vectorize a vector build sequance. + bool vectorizeBuildVector(Instruction *I, BasicBlock *BB, + slpvectorizer::BoUpSLP &R); /// Tries to vectorize constructs started from CmpInst, InsertValueInst or /// InsertElementInst instructions. diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -11559,6 +11559,8 @@ SmallVectorImpl &BuildVectorOpds, SmallVectorImpl &InsertElts, unsigned OperandOffset) { + assert((isa(LastInsertInst)) && + "Expected insertelement or insertvalue instruction!"); do { Value *InsertedOperand = LastInsertInst->getOperand(1); Optional OperandIndex = @@ -11597,13 +11599,10 @@ TargetTransformInfo *TTI, SmallVectorImpl &BuildVectorOpds, SmallVectorImpl &InsertElts) { - - assert((isa(LastInsertInst) || - isa(LastInsertInst)) && - "Expected insertelement or insertvalue instruction!"); - assert((BuildVectorOpds.empty() && InsertElts.empty()) && "Expected empty result vectors!"); + if (!isa(LastInsertInst)) + return false; Optional AggregateSize = getAggregateSize(LastInsertInst); if (!AggregateSize) @@ -11809,36 +11808,42 @@ return Res; } -bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, - BasicBlock *BB, BoUpSLP &R) { - const DataLayout &DL = BB->getModule()->getDataLayout(); - if (!R.canMapToVector(IVI->getType(), DL)) - return false; - +bool SLPVectorizerPass::vectorizeBuildVector(Instruction *I, + BasicBlock *BB, BoUpSLP &R) { SmallVector BuildVectorOpds; SmallVector BuildVectorInsts; - if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) + if (!findBuildAggregate(I, TTI, BuildVectorOpds, BuildVectorInsts)) return false; - LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); - // Aggregate value is unlikely to be processed in vector register. - return tryToVectorizeList(BuildVectorOpds, R); -} + bool OpsChanged = false; + // Try to find reductions in buildvector sequences. + SmallVector PostponedInsts; + for (Value *Op : BuildVectorOpds) + OpsChanged |= + vectorizeHorReduction(nullptr, Op, BB, R, TTI, PostponedInsts); -bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, - BasicBlock *BB, BoUpSLP &R) { - SmallVector BuildVectorInsts; - SmallVector BuildVectorOpds; SmallVector Mask; - if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || - (llvm::all_of( - BuildVectorOpds, - [](Value *V) { return isa(V); }) && - isFixedVectorShuffle(BuildVectorOpds, Mask))) - return false; - - LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); - return tryToVectorizeList(BuildVectorInsts, R); + if (isa(I)) { + const DataLayout &DL = BB->getModule()->getDataLayout(); + if (R.canMapToVector(I->getType(), DL)) { + + LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *I << "\n"); + // Aggregate value is unlikely to be processed in vector register. + OpsChanged |= tryToVectorizeList(BuildVectorOpds, R); + } + } else if (isa(I) && + !(all_of(BuildVectorOpds, + [](Value *V) { + return isa(V); + }) && + isFixedVectorShuffle(BuildVectorOpds, Mask))) { + + LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *I << "\n"); + OpsChanged |= tryToVectorizeList(BuildVectorInsts, R); + } + // Try to vectorize postponed binops where reductions were not found. + OpsChanged |= tryToVectorize(PostponedInsts, R); + return OpsChanged; } template @@ -11974,15 +11979,11 @@ for (auto *I : reverse(Instructions)) { if (R.isDeleted(I)) continue; - if (auto *LastInsertValue = dyn_cast(I)) { - OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); - } else if (auto *LastInsertElem = dyn_cast(I)) { - OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); - } else if (isa(I)) { + if (isa(I)) { PostponedCmps.push_back(I); continue; } - // Try to find reductions in buildvector sequnces. + OpsChanged |= vectorizeBuildVector(I, BB, R); OpsChanged |= vectorizeRootInstruction(nullptr, I, BB, R, TTI); } if (AtTerminator) { diff --git a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll @@ -7,105 +7,26 @@ declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double>, <2 x double*>, i32 immarg, <2 x i1>) -define void @test(double* nocapture readonly %arg, double* nocapture readonly %arg1, double* nocapture %arg2) { -; CHECK-LABEL: @test( +define void @rdx_feeds_buildvector(double* nocapture readonly %arg, double* nocapture readonly %arg1, double* nocapture %arg2) { +; CHECK-LABEL: @rdx_feeds_buildvector( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[GEP1_0:%.*]] = getelementptr inbounds double, double* [[ARG:%.*]], i64 1 -; CHECK-NEXT: [[LD1_0:%.*]] = load double, double* [[GEP1_0]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x double*> poison, double* [[ARG:%.*]], i32 0 +; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x double*> [[TMP0]], <8 x double*> poison, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, <8 x double*> [[SHUFFLE]], <8 x i64> ; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr inbounds double, double* [[ARG1:%.*]], i64 16 -; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 3 -; CHECK-NEXT: [[LD1_1:%.*]] = load double, double* [[GEP1_1]], align 8 -; CHECK-NEXT: [[GEP0_1:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 1 -; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 17 -; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 5 -; CHECK-NEXT: [[LD1_2:%.*]] = load double, double* [[GEP1_2]], align 8 -; CHECK-NEXT: [[GEP0_2:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 2 -; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 18 -; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 7 -; CHECK-NEXT: [[LD1_3:%.*]] = load double, double* [[GEP1_3]], align 8 -; CHECK-NEXT: [[GEP0_3:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 3 -; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 19 -; CHECK-NEXT: [[GEP1_4:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 9 -; CHECK-NEXT: [[LD1_4:%.*]] = load double, double* [[GEP1_4]], align 8 -; CHECK-NEXT: [[GEP0_4:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 4 -; CHECK-NEXT: [[GEP2_4:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 20 -; CHECK-NEXT: [[GEP1_5:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 11 -; CHECK-NEXT: [[LD1_5:%.*]] = load double, double* [[GEP1_5]], align 8 -; CHECK-NEXT: [[GEP0_5:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 5 -; CHECK-NEXT: [[GEP2_5:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 21 -; CHECK-NEXT: [[GEP1_6:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 13 -; CHECK-NEXT: [[LD1_6:%.*]] = load double, double* [[GEP1_6]], align 8 -; CHECK-NEXT: [[GEP0_6:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 6 -; CHECK-NEXT: [[GEP2_6:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 22 -; CHECK-NEXT: [[GEP1_7:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 15 -; CHECK-NEXT: [[LD1_7:%.*]] = load double, double* [[GEP1_7]], align 8 -; CHECK-NEXT: [[GEP0_7:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 7 -; CHECK-NEXT: [[GEP2_7:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 23 -; CHECK-NEXT: [[LD0_0:%.*]] = load double, double* [[ARG1]], align 8 -; CHECK-NEXT: [[LD2_0:%.*]] = load double, double* [[GEP2_0]], align 8 -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[LD0_0]], i32 0 -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[LD2_0]], i32 1 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[LD1_0]], i32 0 -; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[LD1_0]], i32 1 -; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <2 x double> [[TMP1]], [[TMP3]] -; CHECK-NEXT: [[LD0_1:%.*]] = load double, double* [[GEP0_1]], align 8 -; CHECK-NEXT: [[LD2_1:%.*]] = load double, double* [[GEP2_1]], align 8 -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> poison, double [[LD0_1]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> [[TMP5]], double [[LD2_1]], i32 1 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[LD1_1]], i32 0 -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[LD1_1]], i32 1 -; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <2 x double> [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <2 x double> [[TMP4]], [[TMP9]] -; CHECK-NEXT: [[LD0_2:%.*]] = load double, double* [[GEP0_2]], align 8 -; CHECK-NEXT: [[LD2_2:%.*]] = load double, double* [[GEP2_2]], align 8 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x double> poison, double [[LD0_2]], i32 0 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP11]], double [[LD2_2]], i32 1 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x double> poison, double [[LD1_2]], i32 0 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x double> [[TMP13]], double [[LD1_2]], i32 1 -; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <2 x double> [[TMP12]], [[TMP14]] -; CHECK-NEXT: [[TMP16:%.*]] = fadd fast <2 x double> [[TMP10]], [[TMP15]] -; CHECK-NEXT: [[LD0_3:%.*]] = load double, double* [[GEP0_3]], align 8 -; CHECK-NEXT: [[LD2_3:%.*]] = load double, double* [[GEP2_3]], align 8 -; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x double> poison, double [[LD0_3]], i32 0 -; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x double> [[TMP17]], double [[LD2_3]], i32 1 -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x double> poison, double [[LD1_3]], i32 0 -; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x double> [[TMP19]], double [[LD1_3]], i32 1 -; CHECK-NEXT: [[TMP21:%.*]] = fmul fast <2 x double> [[TMP18]], [[TMP20]] -; CHECK-NEXT: [[TMP22:%.*]] = fadd fast <2 x double> [[TMP16]], [[TMP21]] -; CHECK-NEXT: [[LD0_4:%.*]] = load double, double* [[GEP0_4]], align 8 -; CHECK-NEXT: [[LD2_4:%.*]] = load double, double* [[GEP2_4]], align 8 -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x double> poison, double [[LD0_4]], i32 0 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <2 x double> [[TMP23]], double [[LD2_4]], i32 1 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x double> poison, double [[LD1_4]], i32 0 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <2 x double> [[TMP25]], double [[LD1_4]], i32 1 -; CHECK-NEXT: [[TMP27:%.*]] = fmul fast <2 x double> [[TMP24]], [[TMP26]] -; CHECK-NEXT: [[TMP28:%.*]] = fadd fast <2 x double> [[TMP22]], [[TMP27]] -; CHECK-NEXT: [[LD0_5:%.*]] = load double, double* [[GEP0_5]], align 8 -; CHECK-NEXT: [[LD2_5:%.*]] = load double, double* [[GEP2_5]], align 8 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <2 x double> poison, double [[LD0_5]], i32 0 -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <2 x double> [[TMP29]], double [[LD2_5]], i32 1 -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x double> poison, double [[LD1_5]], i32 0 -; CHECK-NEXT: [[TMP32:%.*]] = insertelement <2 x double> [[TMP31]], double [[LD1_5]], i32 1 -; CHECK-NEXT: [[TMP33:%.*]] = fmul fast <2 x double> [[TMP30]], [[TMP32]] -; CHECK-NEXT: [[TMP34:%.*]] = fadd fast <2 x double> [[TMP28]], [[TMP33]] -; CHECK-NEXT: [[LD0_6:%.*]] = load double, double* [[GEP0_6]], align 8 -; CHECK-NEXT: [[LD2_6:%.*]] = load double, double* [[GEP2_6]], align 8 -; CHECK-NEXT: [[TMP35:%.*]] = insertelement <2 x double> poison, double [[LD0_6]], i32 0 -; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x double> [[TMP35]], double [[LD2_6]], i32 1 -; CHECK-NEXT: [[TMP37:%.*]] = insertelement <2 x double> poison, double [[LD1_6]], i32 0 -; CHECK-NEXT: [[TMP38:%.*]] = insertelement <2 x double> [[TMP37]], double [[LD1_6]], i32 1 -; CHECK-NEXT: [[TMP39:%.*]] = fmul fast <2 x double> [[TMP36]], [[TMP38]] -; CHECK-NEXT: [[TMP40:%.*]] = fadd fast <2 x double> [[TMP34]], [[TMP39]] -; CHECK-NEXT: [[LD0_7:%.*]] = load double, double* [[GEP0_7]], align 8 -; CHECK-NEXT: [[LD2_7:%.*]] = load double, double* [[GEP2_7]], align 8 -; CHECK-NEXT: [[TMP41:%.*]] = insertelement <2 x double> poison, double [[LD0_7]], i32 0 -; CHECK-NEXT: [[TMP42:%.*]] = insertelement <2 x double> [[TMP41]], double [[LD2_7]], i32 1 -; CHECK-NEXT: [[TMP43:%.*]] = insertelement <2 x double> poison, double [[LD1_7]], i32 0 -; CHECK-NEXT: [[TMP44:%.*]] = insertelement <2 x double> [[TMP43]], double [[LD1_7]], i32 1 -; CHECK-NEXT: [[TMP45:%.*]] = fmul fast <2 x double> [[TMP42]], [[TMP44]] -; CHECK-NEXT: [[TMP46:%.*]] = fadd fast <2 x double> [[TMP40]], [[TMP45]] +; CHECK-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> [[TMP1]], i32 8, <8 x i1> , <8 x double> undef) +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[ARG1]] to <8 x double>* +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x double>, <8 x double>* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <8 x double> [[TMP4]], [[TMP2]] +; CHECK-NEXT: [[TMP6:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP5]]) +; CHECK-NEXT: [[TMP7:%.*]] = bitcast double* [[GEP2_0]] to <8 x double>* +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x double>, <8 x double>* [[TMP7]], align 8 +; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <8 x double> [[TMP8]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP9]]) +; CHECK-NEXT: [[I142:%.*]] = insertelement <2 x double> poison, double [[TMP6]], i64 0 +; CHECK-NEXT: [[I143:%.*]] = insertelement <2 x double> [[I142]], double [[TMP10]], i64 1 ; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds double, double* [[ARG2:%.*]], <2 x i64> -; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[TMP46]], <2 x double*> [[P]], i32 8, <2 x i1> ) +; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[I143]], <2 x double*> [[P]], i32 8, <2 x i1> ) ; CHECK-NEXT: ret void ; entry: @@ -192,3 +113,74 @@ call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %i143, <2 x double*> %p, i32 8, <2 x i1> ) ret void } + +; In this test reduction feeds single insertelement instruction +define void @rdx_feeds_single_insert(<2 x double> %v, double* nocapture readonly %arg, double* nocapture readonly %arg1, double* nocapture %arg2) { +; CHECK-LABEL: @rdx_feeds_single_insert( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x double*> poison, double* [[ARG:%.*]], i32 0 +; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x double*> [[TMP0]], <8 x double*> poison, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, <8 x double*> [[SHUFFLE]], <8 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> [[TMP1]], i32 8, <8 x i1> , <8 x double> undef) +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[ARG1:%.*]] to <8 x double>* +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x double>, <8 x double>* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <8 x double> [[TMP4]], [[TMP2]] +; CHECK-NEXT: [[TMP6:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP5]]) +; CHECK-NEXT: [[I:%.*]] = insertelement <2 x double> [[V:%.*]], double [[TMP6]], i64 1 +; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds double, double* [[ARG2:%.*]], <2 x i64> +; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[I]], <2 x double*> [[P]], i32 8, <2 x i1> ) +; CHECK-NEXT: ret void +; +entry: + %gep1.0 = getelementptr inbounds double, double* %arg, i64 1 + %ld1.0 = load double, double* %gep1.0, align 8 + %ld0.0 = load double, double* %arg1, align 8 + %mul1.0 = fmul fast double %ld0.0, %ld1.0 + %gep1.1 = getelementptr inbounds double, double* %arg, i64 3 + %ld1.1 = load double, double* %gep1.1, align 8 + %gep0.1 = getelementptr inbounds double, double* %arg1, i64 1 + %ld0.1 = load double, double* %gep0.1, align 8 + %mul1.1 = fmul fast double %ld0.1, %ld1.1 + %rdx1.0 = fadd fast double %mul1.0, %mul1.1 + %gep1.2 = getelementptr inbounds double, double* %arg, i64 5 + %ld1.2 = load double, double* %gep1.2, align 8 + %gep0.2 = getelementptr inbounds double, double* %arg1, i64 2 + %ld0.2 = load double, double* %gep0.2, align 8 + %mul1.2 = fmul fast double %ld0.2, %ld1.2 + %rdx1.1 = fadd fast double %rdx1.0, %mul1.2 + %gep1.3 = getelementptr inbounds double, double* %arg, i64 7 + %ld1.3 = load double, double* %gep1.3, align 8 + %gep0.3 = getelementptr inbounds double, double* %arg1, i64 3 + %ld0.3 = load double, double* %gep0.3, align 8 + %mul1.3 = fmul fast double %ld0.3, %ld1.3 + %rdx1.2 = fadd fast double %rdx1.1, %mul1.3 + %gep1.4 = getelementptr inbounds double, double* %arg, i64 9 + %ld1.4 = load double, double* %gep1.4, align 8 + %gep0.4 = getelementptr inbounds double, double* %arg1, i64 4 + %ld0.4 = load double, double* %gep0.4, align 8 + %mul1.4 = fmul fast double %ld0.4, %ld1.4 + %rdx1.3 = fadd fast double %rdx1.2, %mul1.4 + %gep1.5 = getelementptr inbounds double, double* %arg, i64 11 + %ld1.5 = load double, double* %gep1.5, align 8 + %gep0.5 = getelementptr inbounds double, double* %arg1, i64 5 + %ld0.5 = load double, double* %gep0.5, align 8 + %mul1.5 = fmul fast double %ld0.5, %ld1.5 + %rdx1.4 = fadd fast double %rdx1.3, %mul1.5 + %gep1.6 = getelementptr inbounds double, double* %arg, i64 13 + %ld1.6 = load double, double* %gep1.6, align 8 + %gep0.6 = getelementptr inbounds double, double* %arg1, i64 6 + %ld0.6 = load double, double* %gep0.6, align 8 + %mul1.6 = fmul fast double %ld0.6, %ld1.6 + %rdx1.5 = fadd fast double %rdx1.4, %mul1.6 + %gep1.7 = getelementptr inbounds double, double* %arg, i64 15 + %ld1.7 = load double, double* %gep1.7, align 8 + %gep0.7 = getelementptr inbounds double, double* %arg1, i64 7 + %ld0.7 = load double, double* %gep0.7, align 8 + %mul1.7 = fmul fast double %ld0.7, %ld1.7 + %rdx1 = fadd fast double %rdx1.5, %mul1.7 + %i = insertelement <2 x double> %v, double %rdx1, i64 1 + %p = getelementptr inbounds double, double* %arg2, <2 x i64> + call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %i, <2 x double*> %p, i32 8, <2 x i1> ) + ret void +} +