Index: llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -917,6 +917,32 @@ /// Clears the data. void clear() { OpsVec.clear(); } + /// \Returns true if there are enough operands identical to \p Op to fill + /// the whole vector. + /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. + bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { + bool OpAPO = getData(OpIdx, Lane).APO; + for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { + if (Ln == Lane) + continue; + // This is set to true if we found a candidate for broadcast at Lane. + bool FoundCandidate = false; + for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { + OperandData &Data = getData(OpI, Ln); + if (Data.APO != OpAPO || Data.IsUsed) + continue; + if (Data.V == Op) { + FoundCandidate = true; + Data.IsUsed = true; + break; + } + } + if (!FoundCandidate) + return false; + } + return true; + } + public: /// Initialize with all the operands of the instruction vector \p RootVL. VLOperands(ArrayRef RootVL, const DataLayout &DL, @@ -971,8 +997,13 @@ // side. if (isa(OpLane0)) ReorderingModes[OpIdx] = ReorderingMode::Load; - else if (isa(OpLane0)) - ReorderingModes[OpIdx] = ReorderingMode::Opcode; + else if (isa(OpLane0)) { + // Check if OpLane0 should be broadcast. + if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) + ReorderingModes[OpIdx] = ReorderingMode::Splat; + else + ReorderingModes[OpIdx] = ReorderingMode::Opcode; + } else if (isa(OpLane0)) ReorderingModes[OpIdx] = ReorderingMode::Constant; else if (isa(OpLane0)) @@ -990,9 +1021,8 @@ for (int Pass = 0; Pass != 2; ++Pass) { // Skip the second pass if the first pass did not fail. bool StrategyFailed = false; - // Mark the operand data as free to use for all but the first pass. - if (Pass > 0) - clearUsed(); + // Mark all operand data as free to use. + clearUsed(); // We keep the original operand order for the FirstLane, so reorder the // rest of the lanes. We are visiting the nodes in a circular fashion, // using FirstLane as the center point and increasing the radius Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/broadcast.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/broadcast.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/broadcast.ll @@ -7,29 +7,31 @@ ; S[2] = %v2 + %v1 ; S[3] = %v1 + %v2 ; -; TODO: We should broadcast %v1 and %v2 +; We broadcast %v1 and %v2 ; + define void @bcast_vals(i64 *%A, i64 *%B, i64 *%S) { ; CHECK-LABEL: @bcast_vals( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A0:%.*]] = load i64, i64* [[A:%.*]], align 8 ; CHECK-NEXT: [[B0:%.*]] = load i64, i64* [[B:%.*]], align 8 -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[A0]], i32 0 -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[B0]], i32 1 -; CHECK-NEXT: [[TMP2:%.*]] = sub <2 x i64> [[TMP1]], -; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> undef, <4 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[SHUFFLE]], i32 1 -; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[SHUFFLE]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP5]], i32 1 -; CHECK-NEXT: [[SHUFFLE1:%.*]] = shufflevector <2 x i64> [[TMP6]], <2 x i64> undef, <4 x i32> -; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i64> [[SHUFFLE]], [[SHUFFLE1]] +; CHECK-NEXT: [[V1:%.*]] = sub i64 [[A0]], 1 +; CHECK-NEXT: [[V2:%.*]] = sub i64 [[B0]], 1 +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i64> undef, i64 [[V1]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i64> [[TMP0]], i64 [[V1]], i32 1 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i64> [[TMP1]], i64 [[V1]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i64> [[TMP2]], i64 [[V1]], i32 3 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> undef, i64 [[V2]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i64> [[TMP4]], i64 [[V2]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[V2]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i64> [[TMP6]], i64 [[V2]], i32 3 +; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i64> [[TMP3]], [[TMP7]] ; CHECK-NEXT: [[IDXS0:%.*]] = getelementptr inbounds i64, i64* [[S:%.*]], i64 0 ; CHECK-NEXT: [[IDXS1:%.*]] = getelementptr inbounds i64, i64* [[S]], i64 1 ; CHECK-NEXT: [[IDXS2:%.*]] = getelementptr inbounds i64, i64* [[S]], i64 2 ; CHECK-NEXT: [[IDXS3:%.*]] = getelementptr inbounds i64, i64* [[S]], i64 3 -; CHECK-NEXT: [[TMP8:%.*]] = bitcast i64* [[IDXS0]] to <4 x i64>* -; CHECK-NEXT: store <4 x i64> [[TMP7]], <4 x i64>* [[TMP8]], align 8 +; CHECK-NEXT: [[TMP9:%.*]] = bitcast i64* [[IDXS0]] to <4 x i64>* +; CHECK-NEXT: store <4 x i64> [[TMP8]], <4 x i64>* [[TMP9]], align 8 ; CHECK-NEXT: ret void ; entry: @@ -61,7 +63,8 @@ ; S[2] = %v5 + %v1 ; S[3] = %v1 + %v4 ; -; TODO: We should broadcast %v1. +; We broadcast %v1. + ; define void @bcast_vals2(i16 *%A, i16 *%B, i16 *%C, i16 *%D, i16 *%E, i32 *%S) { ; CHECK-LABEL: @bcast_vals2( @@ -72,25 +75,22 @@ ; CHECK-NEXT: [[D0:%.*]] = load i16, i16* [[D:%.*]], align 8 ; CHECK-NEXT: [[E0:%.*]] = load i16, i16* [[E:%.*]], align 8 ; CHECK-NEXT: [[V1:%.*]] = sext i16 [[A0]] to i32 -; CHECK-NEXT: [[V2:%.*]] = sext i16 [[B0]] to i32 -; CHECK-NEXT: [[V3:%.*]] = sext i16 [[C0]] to i32 -; CHECK-NEXT: [[V4:%.*]] = sext i16 [[D0]] to i32 -; CHECK-NEXT: [[V5:%.*]] = sext i16 [[E0]] to i32 -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[V1]], i32 0 -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[V3]], i32 1 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[V5]], i32 2 -; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[V1]], i32 3 -; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[V2]], i32 0 -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[V1]], i32 1 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[V1]], i32 2 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[V4]], i32 3 -; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP3]], [[TMP7]] +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i16> undef, i16 [[B0]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> [[TMP0]], i16 [[C0]], i32 1 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[E0]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[D0]], i32 3 +; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> undef, i32 [[V1]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[V1]], i32 1 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[V1]], i32 2 +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[V1]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i32> [[TMP8]], [[TMP4]] ; CHECK-NEXT: [[IDXS0:%.*]] = getelementptr inbounds i32, i32* [[S:%.*]], i64 0 ; CHECK-NEXT: [[IDXS1:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 1 ; CHECK-NEXT: [[IDXS2:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 2 ; CHECK-NEXT: [[IDXS3:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 3 -; CHECK-NEXT: [[TMP9:%.*]] = bitcast i32* [[IDXS0]] to <4 x i32>* -; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* [[TMP9]], align 8 +; CHECK-NEXT: [[TMP10:%.*]] = bitcast i32* [[IDXS0]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* [[TMP10]], align 8 ; CHECK-NEXT: ret void ; entry: