Index: lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- lib/Transforms/InstCombine/InstCombineInternal.h +++ lib/Transforms/InstCombine/InstCombineInternal.h @@ -47,14 +47,14 @@ /// 1 -> Constants /// 2 -> Other non-instructions /// 3 -> Arguments -/// 3 -> Unary operations -/// 4 -> Other instructions +/// 4 -> Unary operations +/// 5 -> Other instructions static inline unsigned getComplexity(Value *V) { if (isa(V)) { - if (BinaryOperator::isNeg(V) || BinaryOperator::isFNeg(V) || - BinaryOperator::isNot(V)) - return 3; - return 4; + if (isa(V) || BinaryOperator::isNeg(V) || + BinaryOperator::isFNeg(V) || BinaryOperator::isNot(V)) + return 4; + return 5; } if (isa(V)) return 3; Index: test/Transforms/BBVectorize/X86/loop1.ll =================================================================== --- test/Transforms/BBVectorize/X86/loop1.ll +++ test/Transforms/BBVectorize/X86/loop1.ll @@ -48,7 +48,7 @@ ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3 ; CHECK-UNRL: %add4 = fadd <2 x double> %3, %3 ; CHECK-UNRL: %add5 = fadd <2 x double> %add4, %2 -; CHECK-UNRL: %mul6 = fmul <2 x double> %2, %add5 +; CHECK-UNRL: %mul6 = fmul <2 x double> %add5, %2 ; CHECK-UNRL: %add7 = fadd <2 x double> %add, %mul6 ; CHECK-UNRL: %mul8 = fmul <2 x double> %3, %3 ; CHECK-UNRL: %add9 = fadd <2 x double> %2, %2 Index: test/Transforms/BBVectorize/loop1.ll =================================================================== --- test/Transforms/BBVectorize/loop1.ll +++ test/Transforms/BBVectorize/loop1.ll @@ -74,7 +74,7 @@ ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3 ; CHECK-UNRL: %add4 = fadd <2 x double> %3, %3 ; CHECK-UNRL: %add5 = fadd <2 x double> %add4, %2 -; CHECK-UNRL: %mul6 = fmul <2 x double> %2, %add5 +; CHECK-UNRL: %mul6 = fmul <2 x double> %add5, %2 ; CHECK-UNRL: %add7 = fadd <2 x double> %add, %mul6 ; CHECK-UNRL: %mul8 = fmul <2 x double> %3, %3 ; CHECK-UNRL: %add9 = fadd <2 x double> %2, %2 Index: test/Transforms/InstCombine/add.ll =================================================================== --- test/Transforms/InstCombine/add.ll +++ test/Transforms/InstCombine/add.ll @@ -100,7 +100,7 @@ define i1 @test10(i8 %A, i8 %b) { ; CHECK-LABEL: @test10( ; CHECK-NEXT: [[B:%.*]] = sub i8 0, %b -; CHECK-NEXT: [[C:%.*]] = icmp ne i8 %A, [[B]] +; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[B]], %A ; CHECK-NEXT: ret i1 [[C]] ; %B = add i8 %A, %b @@ -112,7 +112,7 @@ define <2 x i1> @test10vec(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: @test10vec( ; CHECK-NEXT: [[C:%.*]] = sub <2 x i8> zeroinitializer, %b -; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> %a, [[C]] +; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[C]], %a ; CHECK-NEXT: ret <2 x i1> [[D]] ; %c = add <2 x i8> %a, %b Index: test/Transforms/InstCombine/and.ll =================================================================== --- test/Transforms/InstCombine/and.ll +++ test/Transforms/InstCombine/and.ll @@ -176,7 +176,7 @@ define i8 @test17(i8 %X, i8 %Y) { ; CHECK-LABEL: @test17( ; CHECK-NEXT: [[Y_NOT:%.*]] = xor i8 %Y, -1 -; CHECK-NEXT: [[D:%.*]] = or i8 %X, [[Y_NOT]] +; CHECK-NEXT: [[D:%.*]] = or i8 [[Y_NOT]], %X ; CHECK-NEXT: ret i8 [[D]] ; %B = xor i8 %X, -1 Index: test/Transforms/InstCombine/apint-sub.ll =================================================================== --- test/Transforms/InstCombine/apint-sub.ll +++ test/Transforms/InstCombine/apint-sub.ll @@ -50,7 +50,7 @@ define i57 @test6(i57 %A, i57 %B) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i57 %B, -1 -; CHECK-NEXT: [[D:%.*]] = and i57 %A, [[B_NOT]] +; CHECK-NEXT: [[D:%.*]] = and i57 [[B_NOT]], %A ; CHECK-NEXT: ret i57 [[D]] ; %C = and i57 %A, %B Index: test/Transforms/InstCombine/icmp.ll =================================================================== --- test/Transforms/InstCombine/icmp.ll +++ test/Transforms/InstCombine/icmp.ll @@ -918,7 +918,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 %i to i16 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 %j to i16 ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[TMP1]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[TMP2]], [[GEP1_IDX]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]] ; CHECK-NEXT: ret i1 [[TMP3]] ; %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)* @@ -949,7 +949,7 @@ ; CHECK-LABEL: @test60_addrspacecast_smaller( ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 %i, 2 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 %j to i16 -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i16 [[TMP1]], [[GEP1_IDX]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]] ; CHECK-NEXT: ret i1 [[TMP2]] ; %bit = addrspacecast i8* %foo to i32 addrspace(1)* @@ -981,7 +981,7 @@ ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32* [[BIT]], i64 %i ; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8* %foo, i64 %j ; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32* [[GEP1]] to i8* -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* [[CAST1]], [[GEP2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[GEP2]], [[CAST1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %bit = bitcast i8* %foo to i32* @@ -999,7 +999,7 @@ ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32 addrspace(1)* [[BIT]], i16 %i ; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8 addrspace(1)* %foo, i16 %j ; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32 addrspace(1)* [[GEP1]] to i8 addrspace(1)* -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 addrspace(1)* [[CAST1]], [[GEP2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 addrspace(1)* [[GEP2]], [[CAST1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)* Index: test/Transforms/InstCombine/or.ll =================================================================== --- test/Transforms/InstCombine/or.ll +++ test/Transforms/InstCombine/or.ll @@ -490,7 +490,7 @@ ; CHECK-LABEL: @orsext_to_sel_multi_use( ; CHECK-NEXT: [[SEXT:%.*]] = sext i1 %y to i32 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SEXT]], %x -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SEXT]], [[OR]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[OR]], [[SEXT]] ; CHECK-NEXT: ret i32 [[ADD]] ; %sext = sext i1 %y to i32 Index: test/Transforms/InstCombine/select.ll =================================================================== --- test/Transforms/InstCombine/select.ll +++ test/Transforms/InstCombine/select.ll @@ -190,7 +190,7 @@ define i1 @test63(i1 %A, i1 %B) { ; CHECK-LABEL: @test63( ; CHECK-NEXT: [[NOT:%.*]] = xor i1 %A, true -; CHECK-NEXT: [[C:%.*]] = or i1 %B, [[NOT]] +; CHECK-NEXT: [[C:%.*]] = or i1 [[NOT]], %B ; CHECK-NEXT: ret i1 [[C]] ; %not = xor i1 %A, true @@ -201,7 +201,7 @@ define <2 x i1> @test63vec(<2 x i1> %A, <2 x i1> %B) { ; CHECK-LABEL: @test63vec( ; CHECK-NEXT: [[NOT:%.*]] = xor <2 x i1> %A, -; CHECK-NEXT: [[C:%.*]] = or <2 x i1> %B, [[NOT]] +; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[NOT]], %B ; CHECK-NEXT: ret <2 x i1> [[C]] ; %not = xor <2 x i1> %A, Index: test/Transforms/InstCombine/sub.ll =================================================================== --- test/Transforms/InstCombine/sub.ll +++ test/Transforms/InstCombine/sub.ll @@ -54,7 +54,7 @@ define i32 @test6(i32 %A, i32 %B) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 %B, -1 -; CHECK-NEXT: [[D:%.*]] = and i32 %A, [[B_NOT]] +; CHECK-NEXT: [[D:%.*]] = and i32 [[B_NOT]], %A ; CHECK-NEXT: ret i32 [[D]] ; %C = and i32 %A, %B @@ -617,7 +617,7 @@ define i32 @test46(i32 %x, i32 %y) { ; CHECK-LABEL: @test46( ; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 %x, -1 -; CHECK-NEXT: [[SUB:%.*]] = and i32 %y, [[X_NOT]] +; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X_NOT]], %y ; CHECK-NEXT: ret i32 [[SUB]] ; %or = or i32 %x, %y Index: test/Transforms/InstCombine/vec_demanded_elts.ll =================================================================== --- test/Transforms/InstCombine/vec_demanded_elts.ll +++ test/Transforms/InstCombine/vec_demanded_elts.ll @@ -67,7 +67,7 @@ ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]] ; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]] -; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]] ; CHECK-NEXT: ret i64 [[TMP15]] ; %v00 = insertelement <4 x float> undef, float %f, i32 0 Index: test/Transforms/InstCombine/vec_sext.ll =================================================================== --- test/Transforms/InstCombine/vec_sext.ll +++ test/Transforms/InstCombine/vec_sext.ll @@ -6,7 +6,7 @@ ; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> zeroinitializer, %a ; CHECK-NEXT: [[B_LOBIT:%.*]] = ashr <4 x i32> %b, ; CHECK-NEXT: [[T1:%.*]] = xor <4 x i32> [[B_LOBIT]], -; CHECK-NEXT: [[T2:%.*]] = and <4 x i32> %a, [[T1]] +; CHECK-NEXT: [[T2:%.*]] = and <4 x i32> [[T1]], %a ; CHECK-NEXT: [[T3:%.*]] = and <4 x i32> [[B_LOBIT]], [[SUB]] ; CHECK-NEXT: [[COND:%.*]] = or <4 x i32> [[T2]], [[T3]] ; CHECK-NEXT: ret <4 x i32> [[COND]] Index: test/Transforms/InstCombine/x86-avx512.ll =================================================================== --- test/Transforms/InstCombine/x86-avx512.ll +++ test/Transforms/InstCombine/x86-avx512.ll @@ -525,7 +525,7 @@ ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]] ; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]] -; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]] ; CHECK-NEXT: ret i64 [[TMP15]] ; %v00 = insertelement <4 x float> undef, float %f, i32 0 @@ -605,7 +605,7 @@ ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]] ; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]] -; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]] ; CHECK-NEXT: ret i64 [[TMP15]] ; %v00 = insertelement <4 x float> undef, float %f, i32 0 Index: test/Transforms/InstCombine/xor.ll =================================================================== --- test/Transforms/InstCombine/xor.ll +++ test/Transforms/InstCombine/xor.ll @@ -321,7 +321,7 @@ define i32 @test26(i32 %a, i32 %b) { ; CHECK-LABEL: @test26( -; CHECK-NEXT: [[T4:%.*]] = and i32 %a, %b +; CHECK-NEXT: [[T4:%.*]] = and i32 %b, %a ; CHECK-NEXT: ret i32 [[T4]] ; %b2 = xor i32 %b, -1 Index: test/Transforms/InstCombine/xor2.ll =================================================================== --- test/Transforms/InstCombine/xor2.ll +++ test/Transforms/InstCombine/xor2.ll @@ -110,7 +110,7 @@ define i32 @test7(i32 %a, i32 %b) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 %b, -1 -; CHECK-NEXT: [[XOR:%.*]] = or i32 %a, [[B_NOT]] +; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], %a ; CHECK-NEXT: ret i32 [[XOR]] ; %or = or i32 %a, %b @@ -123,7 +123,7 @@ define i32 @test8(i32 %a, i32 %b) { ; CHECK-LABEL: @test8( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 %b, -1 -; CHECK-NEXT: [[XOR:%.*]] = or i32 %a, [[B_NOT]] +; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], %a ; CHECK-NEXT: ret i32 [[XOR]] ; %neg = xor i32 %a, -1 Index: test/Transforms/LoopVectorize/minmax_reduction.ll =================================================================== --- test/Transforms/LoopVectorize/minmax_reduction.ll +++ test/Transforms/LoopVectorize/minmax_reduction.ll @@ -13,7 +13,7 @@ ; CHECK-LABEL: @max_red( ; CHECK: %[[VAR:.*]] = insertelement <2 x i32> undef, i32 %max, i32 0 ; CHECK: {{.*}} = shufflevector <2 x i32> %[[VAR]], <2 x i32> undef, <2 x i32> zeroinitializer -; CHECK: icmp sgt <2 x i32> +; CHECK: icmp slt <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp sgt <2 x i32> @@ -70,7 +70,7 @@ ; Turn this into a min reduction. ; CHECK-LABEL: @min_red( -; CHECK: icmp slt <2 x i32> +; CHECK: icmp sgt <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp slt <2 x i32> @@ -129,7 +129,7 @@ ; Turn this into a max reduction. ; CHECK-LABEL: @umax_red( -; CHECK: icmp ugt <2 x i32> +; CHECK: icmp ult <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp ugt <2 x i32> @@ -186,7 +186,7 @@ ; Turn this into a min reduction. ; CHECK-LABEL: @umin_red( -; CHECK: icmp ult <2 x i32> +; CHECK: icmp ugt <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp ult <2 x i32> @@ -244,7 +244,7 @@ ; SGE -> SLT ; Turn this into a min reduction (select inputs are reversed). ; CHECK-LABEL: @sge_min_red( -; CHECK: icmp sge <2 x i32> +; CHECK: icmp sle <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp slt <2 x i32> @@ -273,7 +273,7 @@ ; SLE -> SGT ; Turn this into a max reduction (select inputs are reversed). ; CHECK-LABEL: @sle_min_red( -; CHECK: icmp sle <2 x i32> +; CHECK: icmp sge <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp sgt <2 x i32> @@ -302,7 +302,7 @@ ; UGE -> ULT ; Turn this into a min reduction (select inputs are reversed). ; CHECK-LABEL: @uge_min_red( -; CHECK: icmp uge <2 x i32> +; CHECK: icmp ule <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp ult <2 x i32> @@ -331,7 +331,7 @@ ; ULE -> UGT ; Turn this into a max reduction (select inputs are reversed). ; CHECK-LABEL: @ule_min_red( -; CHECK: icmp ule <2 x i32> +; CHECK: icmp uge <2 x i32> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: icmp ugt <2 x i32> @@ -412,7 +412,7 @@ ; Turn this into a max reduction in the presence of a no-nans-fp-math attribute. ; CHECK-LABEL: @max_red_float( -; CHECK: fcmp fast ogt <2 x float> +; CHECK: fcmp fast olt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -438,7 +438,7 @@ } ; CHECK-LABEL: @max_red_float_ge( -; CHECK: fcmp fast oge <2 x float> +; CHECK: fcmp fast ole <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -464,7 +464,7 @@ } ; CHECK-LABEL: @inverted_max_red_float( -; CHECK: fcmp fast olt <2 x float> +; CHECK: fcmp fast ogt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -490,7 +490,7 @@ } ; CHECK-LABEL: @inverted_max_red_float_le( -; CHECK: fcmp fast ole <2 x float> +; CHECK: fcmp fast oge <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -516,7 +516,7 @@ } ; CHECK-LABEL: @unordered_max_red_float( -; CHECK: fcmp fast ole <2 x float> +; CHECK: fcmp fast oge <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -542,7 +542,7 @@ } ; CHECK-LABEL: @unordered_max_red_float_ge( -; CHECK: fcmp fast olt <2 x float> +; CHECK: fcmp fast ogt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -568,7 +568,7 @@ } ; CHECK-LABEL: @inverted_unordered_max_red_float( -; CHECK: fcmp fast oge <2 x float> +; CHECK: fcmp fast ole <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -594,7 +594,7 @@ } ; CHECK-LABEL: @inverted_unordered_max_red_float_le( -; CHECK: fcmp fast ogt <2 x float> +; CHECK: fcmp fast olt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast ogt <2 x float> @@ -623,7 +623,7 @@ ; Turn this into a min reduction in the presence of a no-nans-fp-math attribute. ; CHECK-LABEL: @min_red_float( -; CHECK: fcmp fast olt <2 x float> +; CHECK: fcmp fast ogt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -649,7 +649,7 @@ } ; CHECK-LABEL: @min_red_float_le( -; CHECK: fcmp fast ole <2 x float> +; CHECK: fcmp fast oge <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -675,7 +675,7 @@ } ; CHECK-LABEL: @inverted_min_red_float( -; CHECK: fcmp fast ogt <2 x float> +; CHECK: fcmp fast olt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -701,7 +701,7 @@ } ; CHECK-LABEL: @inverted_min_red_float_ge( -; CHECK: fcmp fast oge <2 x float> +; CHECK: fcmp fast ole <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -727,7 +727,7 @@ } ; CHECK-LABEL: @unordered_min_red_float( -; CHECK: fcmp fast oge <2 x float> +; CHECK: fcmp fast ole <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -753,7 +753,7 @@ } ; CHECK-LABEL: @unordered_min_red_float_le( -; CHECK: fcmp fast ogt <2 x float> +; CHECK: fcmp fast olt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -779,7 +779,7 @@ } ; CHECK-LABEL: @inverted_unordered_min_red_float( -; CHECK: fcmp fast ole <2 x float> +; CHECK: fcmp fast oge <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -805,7 +805,7 @@ } ; CHECK-LABEL: @inverted_unordered_min_red_float_ge( -; CHECK: fcmp fast olt <2 x float> +; CHECK: fcmp fast ogt <2 x float> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x float> @@ -832,7 +832,7 @@ ; Make sure we handle doubles, too. ; CHECK-LABEL: @min_red_double( -; CHECK: fcmp fast olt <2 x double> +; CHECK: fcmp fast ogt <2 x double> ; CHECK: select <2 x i1> ; CHECK: middle.block ; CHECK: fcmp fast olt <2 x double> Index: test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll =================================================================== --- test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll +++ test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: %idxprom = zext i32 %j.113 to i64 ; CHECK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %var1, i64 %idxprom ; CHECK-NEXT: store i32 %add, i32* %arrayidx, align 4, !alias.scope !2, !noalias !2 -; CHECK-NEXT: %add8 = add nsw i32 %[[induction]], %add +; CHECK-NEXT: %add8 = add nsw i32 %add, %[[induction]] ; CHECK-NEXT: %inc = add nuw i32 %j.113, 1 ; CHECK-NEXT: %cmp2 = icmp ult i32 %inc, %itr ; CHECK-NEXT: br i1 %cmp2, label %for.body3, label %for.inc11.loopexit.loopexit6, !llvm.loop !5 Index: test/Transforms/SLPVectorizer/X86/operandorder.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/operandorder.ll +++ test/Transforms/SLPVectorizer/X86/operandorder.ll @@ -184,7 +184,7 @@ ; CHECK: %[[V1:[0-9]+]] = load <4 x float>, <4 x float>* ; CHECK: %[[V2:[0-9]+]] = insertelement <4 x float> undef, float %1, i32 0 ; CHECK: %[[V3:[0-9]+]] = shufflevector <4 x float> %[[V2]], <4 x float> %[[V1]], <4 x i32> -; CHECK: = fmul <4 x float> %[[V1]], %[[V3]] +; CHECK: = fmul <4 x float> %[[V3]], %[[V1]] @a = common global [32000 x float] zeroinitializer, align 16