Index: llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -5126,9 +5126,12 @@ /// Checks if the reduction operation can be vectorized. bool isVectorizable() const { return LHS && RHS && - // We currently only support adds && min/max reductions. + // We currently only support add/mul/logical && min/max reductions. ((Kind == RK_Arithmetic && - (Opcode == Instruction::Add || Opcode == Instruction::FAdd)) || + (Opcode == Instruction::Add || Opcode == Instruction::FAdd || + Opcode == Instruction::Mul || Opcode == Instruction::FMul || + Opcode == Instruction::And || Opcode == Instruction::Or || + Opcode == Instruction::Xor)) || ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && (Kind == RK_Min || Kind == RK_Max)) || (Opcode == Instruction::ICmp && Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll @@ -217,29 +217,30 @@ ; ; SSE2-LABEL: @test_and( ; SSE2-NEXT: entry: -; SSE2-NEXT: [[TMP0:%.*]] = load i32, i32* [[P:%.*]], align 4 -; SSE2-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1 -; SSE2-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4 -; SSE2-NEXT: [[MUL_18:%.*]] = and i32 [[TMP1]], [[TMP0]] +; SSE2-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1 ; SSE2-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2 -; SSE2-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4 -; SSE2-NEXT: [[MUL_29:%.*]] = and i32 [[TMP2]], [[MUL_18]] ; SSE2-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3 -; SSE2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4 -; SSE2-NEXT: [[MUL_310:%.*]] = and i32 [[TMP3]], [[MUL_29]] ; SSE2-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4 -; SSE2-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4 -; SSE2-NEXT: [[MUL_411:%.*]] = and i32 [[TMP4]], [[MUL_310]] ; SSE2-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5 -; SSE2-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4 -; SSE2-NEXT: [[MUL_512:%.*]] = and i32 [[TMP5]], [[MUL_411]] ; SSE2-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6 -; SSE2-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4 -; SSE2-NEXT: [[MUL_613:%.*]] = and i32 [[TMP6]], [[MUL_512]] ; SSE2-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 -; SSE2-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4 -; SSE2-NEXT: [[MUL_714:%.*]] = and i32 [[TMP7]], [[MUL_613]] -; SSE2-NEXT: ret i32 [[MUL_714]] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>* +; SSE2-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4 +; SSE2-NEXT: [[MUL_18:%.*]] = and i32 undef, undef +; SSE2-NEXT: [[MUL_29:%.*]] = and i32 undef, [[MUL_18]] +; SSE2-NEXT: [[MUL_310:%.*]] = and i32 undef, [[MUL_29]] +; SSE2-NEXT: [[MUL_411:%.*]] = and i32 undef, [[MUL_310]] +; SSE2-NEXT: [[MUL_512:%.*]] = and i32 undef, [[MUL_411]] +; SSE2-NEXT: [[MUL_613:%.*]] = and i32 undef, [[MUL_512]] +; SSE2-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX:%.*]] = and <8 x i32> [[TMP1]], [[RDX_SHUF]] +; SSE2-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX2:%.*]] = and <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; SSE2-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX4:%.*]] = and <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; SSE2-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; SSE2-NEXT: [[MUL_714:%.*]] = and i32 undef, [[MUL_613]] +; SSE2-NEXT: ret i32 [[TMP2]] ; entry: %0 = load i32, i32* %p, align 4 @@ -303,29 +304,30 @@ ; ; SSE2-LABEL: @test_or( ; SSE2-NEXT: entry: -; SSE2-NEXT: [[TMP0:%.*]] = load i32, i32* [[P:%.*]], align 4 -; SSE2-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1 -; SSE2-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4 -; SSE2-NEXT: [[MUL_18:%.*]] = or i32 [[TMP1]], [[TMP0]] +; SSE2-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1 ; SSE2-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2 -; SSE2-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4 -; SSE2-NEXT: [[MUL_29:%.*]] = or i32 [[TMP2]], [[MUL_18]] ; SSE2-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3 -; SSE2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4 -; SSE2-NEXT: [[MUL_310:%.*]] = or i32 [[TMP3]], [[MUL_29]] ; SSE2-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4 -; SSE2-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4 -; SSE2-NEXT: [[MUL_411:%.*]] = or i32 [[TMP4]], [[MUL_310]] ; SSE2-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5 -; SSE2-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4 -; SSE2-NEXT: [[MUL_512:%.*]] = or i32 [[TMP5]], [[MUL_411]] ; SSE2-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6 -; SSE2-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4 -; SSE2-NEXT: [[MUL_613:%.*]] = or i32 [[TMP6]], [[MUL_512]] ; SSE2-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 -; SSE2-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4 -; SSE2-NEXT: [[MUL_714:%.*]] = or i32 [[TMP7]], [[MUL_613]] -; SSE2-NEXT: ret i32 [[MUL_714]] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>* +; SSE2-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4 +; SSE2-NEXT: [[MUL_18:%.*]] = or i32 undef, undef +; SSE2-NEXT: [[MUL_29:%.*]] = or i32 undef, [[MUL_18]] +; SSE2-NEXT: [[MUL_310:%.*]] = or i32 undef, [[MUL_29]] +; SSE2-NEXT: [[MUL_411:%.*]] = or i32 undef, [[MUL_310]] +; SSE2-NEXT: [[MUL_512:%.*]] = or i32 undef, [[MUL_411]] +; SSE2-NEXT: [[MUL_613:%.*]] = or i32 undef, [[MUL_512]] +; SSE2-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX:%.*]] = or <8 x i32> [[TMP1]], [[RDX_SHUF]] +; SSE2-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX2:%.*]] = or <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; SSE2-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX4:%.*]] = or <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; SSE2-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; SSE2-NEXT: [[MUL_714:%.*]] = or i32 undef, [[MUL_613]] +; SSE2-NEXT: ret i32 [[TMP2]] ; entry: %0 = load i32, i32* %p, align 4 @@ -389,29 +391,30 @@ ; ; SSE2-LABEL: @test_xor( ; SSE2-NEXT: entry: -; SSE2-NEXT: [[TMP0:%.*]] = load i32, i32* [[P:%.*]], align 4 -; SSE2-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1 -; SSE2-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4 -; SSE2-NEXT: [[MUL_18:%.*]] = xor i32 [[TMP1]], [[TMP0]] +; SSE2-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1 ; SSE2-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2 -; SSE2-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4 -; SSE2-NEXT: [[MUL_29:%.*]] = xor i32 [[TMP2]], [[MUL_18]] ; SSE2-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3 -; SSE2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4 -; SSE2-NEXT: [[MUL_310:%.*]] = xor i32 [[TMP3]], [[MUL_29]] ; SSE2-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4 -; SSE2-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4 -; SSE2-NEXT: [[MUL_411:%.*]] = xor i32 [[TMP4]], [[MUL_310]] ; SSE2-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5 -; SSE2-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4 -; SSE2-NEXT: [[MUL_512:%.*]] = xor i32 [[TMP5]], [[MUL_411]] ; SSE2-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6 -; SSE2-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4 -; SSE2-NEXT: [[MUL_613:%.*]] = xor i32 [[TMP6]], [[MUL_512]] ; SSE2-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 -; SSE2-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4 -; SSE2-NEXT: [[MUL_714:%.*]] = xor i32 [[TMP7]], [[MUL_613]] -; SSE2-NEXT: ret i32 [[MUL_714]] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>* +; SSE2-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4 +; SSE2-NEXT: [[MUL_18:%.*]] = xor i32 undef, undef +; SSE2-NEXT: [[MUL_29:%.*]] = xor i32 undef, [[MUL_18]] +; SSE2-NEXT: [[MUL_310:%.*]] = xor i32 undef, [[MUL_29]] +; SSE2-NEXT: [[MUL_411:%.*]] = xor i32 undef, [[MUL_310]] +; SSE2-NEXT: [[MUL_512:%.*]] = xor i32 undef, [[MUL_411]] +; SSE2-NEXT: [[MUL_613:%.*]] = xor i32 undef, [[MUL_512]] +; SSE2-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX:%.*]] = xor <8 x i32> [[TMP1]], [[RDX_SHUF]] +; SSE2-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX2:%.*]] = xor <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; SSE2-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; SSE2-NEXT: [[BIN_RDX4:%.*]] = xor <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; SSE2-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; SSE2-NEXT: [[MUL_714:%.*]] = xor i32 undef, [[MUL_613]] +; SSE2-NEXT: ret i32 [[TMP2]] ; entry: %0 = load i32, i32* %p, align 4