Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2389,10 +2389,27 @@ case Intrinsic::x86_avx512_mask_cmp_pd_512: case Intrinsic::x86_avx512_mask_cmp_ps_128: case Intrinsic::x86_avx512_mask_cmp_ps_256: - case Intrinsic::x86_avx512_mask_cmp_ps_512: + case Intrinsic::x86_avx512_mask_cmp_ps_512: { if(X86CreateCanonicalCMP(II)) return II; + // Folding cmp(sub(a,b),0) into cmp(a,b) + if (Instruction *I = dyn_cast(II->getArgOperand(0))) { + if (I->getOpcode() == Instruction::FSub && I->hasOneUse()) { + // This folding is not valid for safe algebra, + // but it doesn't require all of the fast flags + // only the ninf flag. + FastMathFlags FMFs = I->getFastMathFlags(); + if (FMFs.noInfs() && isa((II->getArgOperand(1)))) { + Value *LHS = I->getOperand(0); + Value *RHS = I->getOperand(1); + II->setArgOperand(0, LHS); + II->setArgOperand(1, RHS); + return II; + } + } + } break; + } case Intrinsic::x86_avx512_mask_add_ps_512: case Intrinsic::x86_avx512_mask_div_ps_512: Index: test/Transforms/InstCombine/X86FsubCmpCombine.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86FsubCmpCombine.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S| FileCheck %s + +; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b). + +define zeroext i16 @fucntionTets(<2 x double> %a, <4 x double> %b, <8 x double> %c, <4 x float> %d, <8 x float> %e, <16 x float> %f,<2 x double> %aa, <4 x double> %bb, <8 x double> %cc, <4 x float> %dd, <8 x float> %ee, <16 x float> %ff) local_unnamed_addr #0 { +; CHECK-LABEL: @fucntionTets( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SUB_SAFE:%.*]] = fsub <2 x double> [[A:%.*]], [[AA:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> [[SUB_SAFE]], <2 x double> zeroinitializer, i32 5, i8 -1) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> [[A]], <2 x double> [[AA]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP2:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> [[B:%.*]], <4 x double> [[BB:%.*]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP3:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> [[C:%.*]], <8 x double> [[CC:%.*]], i32 5, i8 -1, i32 4) +; CHECK-NEXT: [[TMP4:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> [[D:%.*]], <4 x float> [[DD:%.*]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP5:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> [[E:%.*]], <8 x float> [[EE:%.*]], i32 5, i8 -1) +; CHECK-NEXT: [[TMP6:%.*]] = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> [[F:%.*]], <16 x float> [[FF:%.*]], i32 5, i16 -1, i32 4) +; CHECK-NEXT: [[AND38:%.*]] = and i8 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[AND2039:%.*]] = and i8 [[AND38]], [[TMP2]] +; CHECK-NEXT: [[AND2240:%.*]] = and i8 [[AND2039]], [[TMP3]] +; CHECK-NEXT: [[AND2441:%.*]] = and i8 [[AND2240]], [[TMP4]] +; CHECK-NEXT: [[AND2442:%.*]] = and i8 [[AND2441]], [[TMP5]] +; CHECK-NEXT: [[CONV27:%.*]] = zext i8 [[AND2442]] to i16 +; CHECK-NEXT: [[AND28:%.*]] = and i16 [[TMP6]], [[CONV27]] +; CHECK-NEXT: ret i16 [[AND28]] +; +entry: + %sub.safe = fsub <2 x double> %a, %aa + %sub.i = fsub ninf <2 x double> %a, %aa + %sub.i46 = fsub ninf <4 x double> %b, %bb + %sub.i45 = fsub ninf <8 x double> %c, %cc + %sub.i44 = fsub ninf <4 x float> %d, %dd + %sub.i43 = fsub ninf <8 x float> %e, %ee + %sub.i42 = fsub ninf <16 x float> %f, %ff + ; Safe math + %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %sub.safe, <2 x double> zeroinitializer, i32 5, i8 -1) + + %1 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %sub.i, <2 x double> zeroinitializer, i32 5, i8 -1) + %2 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %sub.i46, <4 x double> zeroinitializer, i32 5, i8 -1) + %3 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %sub.i45, <8 x double> zeroinitializer, i32 5, i8 -1, i32 4) + %4 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %sub.i44, <4 x float> zeroinitializer, i32 5, i8 -1) + %5 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %sub.i43, <8 x float> zeroinitializer, i32 5, i8 -1) + %6 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %sub.i42, <16 x float> zeroinitializer, i32 5, i16 -1, i32 4) + %and38 = and i8 %0, %1 + %and2039 = and i8 %and38, %2 + %and2240 = and i8 %and2039, %3 + %and2441 = and i8 %and2240, %4 + %and2442 = and i8 %and2441, %5 + %conv27 = zext i8 %and2442 to i16 + %and28 = and i16 %6, %conv27 + ret i16 %and28 +} + +declare i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double>, <2 x double>, i32, i8) +declare i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double>, <4 x double>, i32, i8) +declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double>, <8 x double>, i32, i8, i32) +declare i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float>, <4 x float>, i32, i8) +declare i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float>, <8 x float>, i32, i8) +declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float>, <16 x float>, i32, i16, i32)