Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2389,10 +2389,28 @@ case Intrinsic::x86_avx512_mask_cmp_pd_512: case Intrinsic::x86_avx512_mask_cmp_ps_128: case Intrinsic::x86_avx512_mask_cmp_ps_256: - case Intrinsic::x86_avx512_mask_cmp_ps_512: + case Intrinsic::x86_avx512_mask_cmp_ps_512: { if(X86CreateCanonicalCMP(II)) return II; + // Folding cmp(sub(a,b),0) into cmp(a,b) + if (Instruction *I = dyn_cast(II->getArgOperand(0))) { + if (I->getOpcode() == Instruction::FSub && I->hasOneUse()) { + // This folding is not valid for safe algebra, + // but it doesn't require all of the fast flags + // (No NaNs,no inf,no sign zero) + FastMathFlags FMFs = I->getFastMathFlags(); + if (FMFs.noNaNs() && FMFs.noInfs() && FMFs.noSignedZeros() && + isa((II->getArgOperand(1)))) { + Value *LHS = I->getOperand(0); + Value *RHS = I->getOperand(1); + II->setArgOperand(0, LHS); + II->setArgOperand(1, RHS); + return II; + } + } + } break; + } case Intrinsic::x86_avx512_mask_add_ps_512: case Intrinsic::x86_avx512_mask_div_ps_512: Index: test/Transforms/InstCombine/X86FsubCmppd256Combine.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86FsubCmppd256Combine.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S| FileCheck %s + +; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b). + +define zeroext i8 @fucntionTets(<4 x double> %a, <4 x double> %aa) local_unnamed_addr #0 { +; CHECK-LABEL: @fucntionTets( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SUB_I1:%.*]] = fsub nnan <4 x double> [[AA:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> [[A]], <4 x double> [[AA]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> [[SUB_I1]], <4 x double> zeroinitializer, i32 5, i8 -1) +; CHECK-NEXT: [[AND28:%.*]] = and i8 [[TMP0]], [[TMP1]] +; CHECK-NEXT: ret i8 [[AND28]] +; +entry: + %sub.i = fsub fast <4 x double> %a, %aa + %sub.i1 = fsub nnan <4 x double> %aa, %a + %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %sub.i, <4 x double> zeroinitializer, i32 5, i8 -1) + ; Require that all three fast math flags will be defined + %1 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %sub.i1, <4 x double> zeroinitializer, i32 5, i8 -1) + %and28 = and i8 %0, %1 + ret i8 %and28 +} + +declare i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double>, <4 x double>, i32, i8) Index: test/Transforms/InstCombine/X86FsubCmppd512Combine.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86FsubCmppd512Combine.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S| FileCheck %s + +; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b). + +define zeroext i8 @fucntionTets(<8 x double> %a, <8 x double> %aa) local_unnamed_addr #0 { +; CHECK-LABEL: @fucntionTets( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SUB_I1:%.*]] = fsub nnan <8 x double> [[AA:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> [[A]], <8 x double> [[AA]], i32 5, i8 -1, i32 4) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> [[SUB_I1]], <8 x double> zeroinitializer, i32 5, i8 -1, i32 4) +; CHECK-NEXT: [[AND28:%.*]] = and i8 [[TMP0]], [[TMP1]] +; CHECK-NEXT: ret i8 [[AND28]] +; +entry: + %sub.i = fsub fast <8 x double> %a, %aa + %sub.i1 = fsub nnan <8 x double> %aa, %a + %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %sub.i, <8 x double> zeroinitializer, i32 5, i8 -1, i32 4) + ; Require that all three fast math flags will be defined + %1 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %sub.i1, <8 x double> zeroinitializer, i32 5, i8 -1, i32 4) + %and28 = and i8 %0, %1 + ret i8 %and28 +} + +declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double>, <8 x double>, i32, i8, i32) Index: test/Transforms/InstCombine/X86FsubCmppdCombine.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86FsubCmppdCombine.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S| FileCheck %s + +; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b). + +define zeroext i8 @fucntionTets(<2 x double> %a, <2 x double> %aa) local_unnamed_addr #0 { +; CHECK-LABEL: @fucntionTets( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SUB1_I:%.*]] = fsub nnan <2 x double> [[AA:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> [[A]], <2 x double> [[AA]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> [[SUB1_I]], <2 x double> zeroinitializer, i32 5, i8 -1) +; CHECK-NEXT: [[AND28:%.*]] = and i8 [[TMP0]], [[TMP1]] +; CHECK-NEXT: ret i8 [[AND28]] +; +entry: + %sub.i = fsub fast <2 x double> %a, %aa + %sub1.i = fsub nnan <2 x double> %aa, %a + %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %sub.i, <2 x double> zeroinitializer, i32 5, i8 -1) + ; Require that all three fast math flags will be defined + %1 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %sub1.i, <2 x double> zeroinitializer, i32 5, i8 -1) + %and28 = and i8 %0, %1 + ret i8 %and28 +} + +declare i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double>, <2 x double>, i32, i8) Index: test/Transforms/InstCombine/X86FsubCmpps256Combine.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86FsubCmpps256Combine.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S| FileCheck %s + +; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b). + +define zeroext i8 @fucntionTets(<8 x float> %a, <8 x float> %aa) local_unnamed_addr #0 { +; CHECK-LABEL: @fucntionTets( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SUB_I1:%.*]] = fsub nnan <8 x float> [[AA:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> [[A]], <8 x float> [[AA]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> [[SUB_I1]], <8 x float> zeroinitializer, i32 5, i8 -1) +; CHECK-NEXT: [[AND28:%.*]] = and i8 [[TMP0]], [[TMP1]] +; CHECK-NEXT: ret i8 [[AND28]] +; +entry: + %sub.i = fsub fast <8 x float> %a, %aa + %sub.i1 = fsub nnan <8 x float> %aa, %a + %0 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %sub.i, <8 x float> zeroinitializer, i32 5, i8 -1) + ; Require that all three fast math flags will be defined + %1 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %sub.i1, <8 x float> zeroinitializer, i32 5, i8 -1) + %and28 = and i8 %0, %1 + ret i8 %and28 +} + +declare i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float>, <8 x float>, i32, i8) Index: test/Transforms/InstCombine/X86FsubCmpps512Combine.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86FsubCmpps512Combine.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S| FileCheck %s + +; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b). + +define zeroext i16 @fucntionTets(<16 x float> %a, <16 x float> %aa) local_unnamed_addr #0 { +; CHECK-LABEL: @fucntionTets( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SUB_I1:%.*]] = fsub nnan <16 x float> [[AA:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> [[A]], <16 x float> [[AA]], i32 5, i16 -1, i32 4) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> [[SUB_I1]], <16 x float> zeroinitializer, i32 5, i16 -1, i32 4) +; CHECK-NEXT: [[AND28:%.*]] = and i16 [[TMP0]], [[TMP1]] +; CHECK-NEXT: ret i16 [[AND28]] +; +entry: + %sub.i = fsub fast <16 x float> %a, %aa + %sub.i1 = fsub nnan <16 x float> %aa, %a + %0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %sub.i, <16 x float> zeroinitializer, i32 5, i16 -1, i32 4) + ; Require that all three fast math flags will be defined + %1 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %sub.i1, <16 x float> zeroinitializer, i32 5, i16 -1, i32 4) + %and28 = and i16 %0, %1 + ret i16 %and28 +} + +declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float>, <16 x float>, i32, i16, i32) Index: test/Transforms/InstCombine/X86FsubCmppsCombine.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86FsubCmppsCombine.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S| FileCheck %s + +; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b). + +define zeroext i8 @fucntionTets(<4 x float> %a, <4 x float> %aa) local_unnamed_addr #0 { +; CHECK-LABEL: @fucntionTets( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SUB_I1:%.*]] = fsub nnan <4 x float> [[AA:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> [[A]], <4 x float> [[AA]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> [[SUB_I1]], <4 x float> zeroinitializer, i32 5, i8 -1) +; CHECK-NEXT: [[AND28:%.*]] = and i8 [[TMP0]], [[TMP1]] +; CHECK-NEXT: ret i8 [[AND28]] +; +entry: + %sub.i = fsub fast <4 x float> %a, %aa + %sub.i1 = fsub nnan <4 x float> %aa, %a + %0 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %sub.i, <4 x float> zeroinitializer, i32 5, i8 -1) + ; Require that all three fast math flags will be defined + %1 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %sub.i1, <4 x float> zeroinitializer, i32 5, i8 -1) + %and28 = and i8 %0, %1 + ret i8 %and28 +} + +declare i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float>, <4 x float>, i32, i8)