diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -4592,6 +4592,9 @@ static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse) { + if (Constant *C = simplifyFPOp({Op0, Op1})) + return C; + // fmul X, 1.0 ==> X if (match(Op1, m_FPOne())) return Op0; @@ -4626,9 +4629,6 @@ if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q)) return C; - if (Constant *C = simplifyFPOp({Op0, Op1})) - return C; - // Now apply simplifications that do not require rounding. return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse); } diff --git a/llvm/test/Transforms/InstCombine/fma.ll b/llvm/test/Transforms/InstCombine/fma.ll --- a/llvm/test/Transforms/InstCombine/fma.ll +++ b/llvm/test/Transforms/InstCombine/fma.ll @@ -502,8 +502,7 @@ define <2 x double> @fma_nan_and_const_0(<2 x double> %b) { ; CHECK-LABEL: @fma_nan_and_const_0( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RES:%.*]] = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> , <2 x double> , <2 x double> [[B:%.*]]) -; CHECK-NEXT: ret <2 x double> [[RES]] +; CHECK-NEXT: ret <2 x double> ; entry: %res = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> , <2 x double> , <2 x double> %b) @@ -513,8 +512,7 @@ define <2 x double> @fma_nan_and_const_1(<2 x double> %b) { ; CHECK-LABEL: @fma_nan_and_const_1( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RES:%.*]] = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> , <2 x double> , <2 x double> [[B:%.*]]) -; CHECK-NEXT: ret <2 x double> [[RES]] +; CHECK-NEXT: ret <2 x double> ; entry: %res = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> , <2 x double> , <2 x double> %b) @@ -535,8 +533,7 @@ define <2 x double> @fma_undef_0(<2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: @fma_undef_0( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RES:%.*]] = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> [[B:%.*]], <2 x double> undef, <2 x double> [[C:%.*]]) -; CHECK-NEXT: ret <2 x double> [[RES]] +; CHECK-NEXT: ret <2 x double> ; entry: %res = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> , <2 x double> %b, <2 x double> %c) @@ -546,8 +543,7 @@ define <2 x double> @fma_undef_1(<2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: @fma_undef_1( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RES:%.*]] = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> [[B:%.*]], <2 x double> undef, <2 x double> [[C:%.*]]) -; CHECK-NEXT: ret <2 x double> [[RES]] +; CHECK-NEXT: ret <2 x double> ; entry: %res = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> , <2 x double> %c) @@ -602,8 +598,7 @@ define <2 x double> @fma_nan_0(<2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: @fma_nan_0( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RES:%.*]] = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> [[B:%.*]], <2 x double> , <2 x double> [[C:%.*]]) -; CHECK-NEXT: ret <2 x double> [[RES]] +; CHECK-NEXT: ret <2 x double> ; entry: %res = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> , <2 x double> %b, <2 x double> %c) @@ -612,8 +607,7 @@ define <2 x double> @fma_nan_1(<2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: @fma_nan_1( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RES:%.*]] = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> [[B:%.*]], <2 x double> , <2 x double> [[C:%.*]]) -; CHECK-NEXT: ret <2 x double> [[RES]] +; CHECK-NEXT: ret <2 x double> ; entry: %res = call nnan nsz <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> , <2 x double> %c)