diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -836,7 +836,7 @@ } } - Value *A; + Value *A, *B; Constant *C; if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) { unsigned AWidth = A->getType()->getScalarSizeInBits(); @@ -950,6 +950,18 @@ } } + // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C) + if (match(Src, m_OneUse(m_Intrinsic(m_ZExt(m_Value(A)), + m_Value(B))))) { + unsigned AWidth = A->getType()->getScalarSizeInBits(); + APInt ZextWidth(32, SrcWidth); + if (AWidth == DestWidth && AWidth > ZextWidth.logBase2()) { + Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth); + Value *NarrowCtlz = + Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B}); + return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff); + } + } return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll b/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll --- a/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll +++ b/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll @@ -1,7 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s +declare i3 @llvm.ctlz.i3 (i3 , i1) declare i32 @llvm.ctlz.i32 (i32, i1) +declare i34 @llvm.ctlz.i34 (i34, i1) declare <2 x i33> @llvm.ctlz.v2i33 (<2 x i33>, i1) declare <2 x i32> @llvm.ctlz.v2i32 (<2 x i32>, i1) declare @llvm.ctlz.nxv2i64 (, i1) @@ -11,9 +13,8 @@ define i16 @trunc_ctlz_zext_i16_i32(i16 %x) { ; CHECK-LABEL: @trunc_ctlz_zext_i16_i32( -; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[P:%.*]] = call i32 @llvm.ctlz.i32(i32 [[Z]], i1 false), !range [[RNG0:![0-9]+]] -; CHECK-NEXT: [[ZZ:%.*]] = trunc i32 [[P]] to i16 +; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0:![0-9]+]] +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i16 [[TMP1]], 16 ; CHECK-NEXT: ret i16 [[ZZ]] ; %z = zext i16 %x to i32 @@ -26,9 +27,8 @@ define <2 x i8> @trunc_ctlz_zext_v2i8_v2i33(<2 x i8> %x) { ; CHECK-LABEL: @trunc_ctlz_zext_v2i8_v2i33( -; CHECK-NEXT: [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i33> -; CHECK-NEXT: [[P:%.*]] = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> [[Z]], i1 true) -; CHECK-NEXT: [[ZZ:%.*]] = trunc <2 x i33> [[P]] to <2 x i8> +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[X:%.*]], i1 true) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i8> [[ZZ]] ; %z = zext <2 x i8> %x to <2 x i33> @@ -41,9 +41,8 @@ define @trunc_ctlz_zext_nxv2i16_nxv2i64( %x) { ; CHECK-LABEL: @trunc_ctlz_zext_nxv2i16_nxv2i64( -; CHECK-NEXT: [[Z:%.*]] = zext [[X:%.*]] to -; CHECK-NEXT: [[P:%.*]] = call @llvm.ctlz.nxv2i64( [[Z]], i1 false) -; CHECK-NEXT: [[ZZ:%.*]] = trunc [[P]] to +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.ctlz.nxv2i16( [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw [[TMP1]], shufflevector ( insertelement ( undef, i16 48, i32 0), undef, zeroinitializer) ; CHECK-NEXT: ret [[ZZ]] ; %z = zext %x to @@ -52,6 +51,8 @@ ret %zz } +; Multiple uses of ctlz for which the opt is disabled + define <2 x i17> @trunc_ctlz_zext_v2i17_v2i32_multiple_uses(<2 x i17> %x) { ; CHECK-LABEL: @trunc_ctlz_zext_v2i17_v2i32_multiple_uses( ; CHECK-NEXT: [[Z:%.*]] = zext <2 x i17> [[X:%.*]] to <2 x i32> @@ -67,11 +68,13 @@ ret <2 x i17> %zz } +; Multiple uses of zext for which the opt is disabled + define @trunc_ctlz_zext_nxv2i16_nxv2i63_multiple_uses( %x) { ; CHECK-LABEL: @trunc_ctlz_zext_nxv2i16_nxv2i63_multiple_uses( ; CHECK-NEXT: [[Z:%.*]] = zext [[X:%.*]] to -; CHECK-NEXT: [[P:%.*]] = call @llvm.ctlz.nxv2i63( [[Z]], i1 true) -; CHECK-NEXT: [[ZZ:%.*]] = trunc [[P]] to +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.ctlz.nxv2i16( [[X]], i1 true) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw [[TMP1]], shufflevector ( insertelement ( undef, i16 47, i32 0), undef, zeroinitializer) ; CHECK-NEXT: call void @use1( [[Z]]) ; CHECK-NEXT: ret [[ZZ]] ; @@ -81,3 +84,39 @@ call void @use1( %z) ret %zz } + +; Negative case where types of x and zz don't match + +define i16 @trunc_ctlz_zext_i10_i32(i10 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i10_i32( +; CHECK-NEXT: [[Z:%.*]] = zext i10 [[X:%.*]] to i32 +; CHECK-NEXT: [[P:%.*]] = call i32 @llvm.ctlz.i32(i32 [[Z]], i1 false), !range [[RNG1:![0-9]+]] +; CHECK-NEXT: [[ZZ:%.*]] = trunc i32 [[P]] to i16 +; CHECK-NEXT: ret i16 [[ZZ]] +; + %z = zext i10 %x to i32 + %p = call i32 @llvm.ctlz.i32(i32 %z, i1 false) + %zz = trunc i32 %p to i16 + ret i16 %zz +} + +; Test width difference of more than log2 between x and t +; TODO: Enable the opt for this case if it is proved that the +; opt works for all combinations of bitwidth of zext src and dst. +; Refer : https://reviews.llvm.org/D103788 + +define i3 @trunc_ctlz_zext_i3_i34(i3 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i3_i34( +; CHECK-NEXT: [[Z:%.*]] = zext i3 [[X:%.*]] to i34 +; CHECK-NEXT: [[P:%.*]] = call i34 @llvm.ctlz.i34(i34 [[Z]], i1 false), !range [[RNG2:![0-9]+]] +; CHECK-NEXT: [[T:%.*]] = trunc i34 [[P]] to i3 +; CHECK-NEXT: ret i3 [[T]] +; + %z = zext i3 %x to i34 + %p = call i34 @llvm.ctlz.i34(i34 %z, i1 false) + %t = trunc i34 %p to i3 + ret i3 %t +} + + +