diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -836,7 +836,7 @@ } } - Value *A; + Value *A, *B; Constant *C; if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) { unsigned AWidth = A->getType()->getScalarSizeInBits(); @@ -950,6 +950,16 @@ } } + // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C) + if (match(Src, m_OneUse(m_Intrinsic(m_ZExt(m_Value(A)), + m_Value(B))))) { + Value *WidthDiff = + ConstantInt::get(A->getType(), Src->getType()->getScalarSizeInBits() - + A->getType()->getScalarSizeInBits()); + + Value *NarrowCtlz = Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B}); + return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff); + } return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll b/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll @@ -0,0 +1,254 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +declare i8 @llvm.ctlz.i8 (i8, i1) +declare i16 @llvm.ctlz.i16 (i16, i1) +declare i32 @llvm.ctlz.i32 (i32, i1) +declare i64 @llvm.ctlz.i64 (i64, i1) +declare <2 x i8> @llvm.ctlz.v2i8 (<2 x i8>, i1) +declare <2 x i16> @llvm.ctlz.v2i16 (<2 x i16>, i1) +declare <2 x i32> @llvm.ctlz.v2i32 (<2 x i32>, i1) +declare <2 x i64> @llvm.ctlz.v2i64 (<2 x i64>, i1) +declare @llvm.ctlz.nxv2i8 (, i1) +declare @llvm.ctlz.nxv2i16 (, i1) +declare @llvm.ctlz.nxv2i32 (, i1) +declare @llvm.ctlz.nxv2i64 (, i1) + +define i16 @trunc_ctlz_zext_i16_i32(i16 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i16_i32( +; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0:![0-9]+]] +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i16 [[TMP1]], 16 +; CHECK-NEXT: ret i16 [[ZZ]] +; + %z = zext i16 %x to i32 + %p = call i32 @llvm.ctlz.i32(i32 %z, i1 false) + %zz = trunc i32 %p to i16 + ret i16 %zz +} + +define i8 @trunc_ctlz_zext_i8_i32(i8 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i8_i32( +; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.ctlz.i8(i8 [[X:%.*]], i1 false), !range [[RNG1:![0-9]+]] +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i8 [[TMP1]], 24 +; CHECK-NEXT: ret i8 [[ZZ]] +; + %z = zext i8 %x to i32 + %p = call i32 @llvm.ctlz.i32(i32 %z, i1 false) + %zz = trunc i32 %p to i8 + ret i8 %zz +} + +define i8 @trunc_ctlz_zext_i8_i64(i8 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i8_i64( +; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.ctlz.i8(i8 [[X:%.*]], i1 false), !range [[RNG1]] +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i8 [[TMP1]], 56 +; CHECK-NEXT: ret i8 [[ZZ]] +; + %z = zext i8 %x to i64 + %p = call i64 @llvm.ctlz.i64(i64 %z, i1 false) + %zz = trunc i64 %p to i8 + ret i8 %zz +} + +define i32 @trunc_ctlz_zext_i32_i64(i32 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i32_i64( +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range [[RNG2:![0-9]+]] +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i32 [[TMP1]], 32 +; CHECK-NEXT: ret i32 [[ZZ]] +; + %z = zext i32 %x to i64 + %p = call i64 @llvm.ctlz.i64(i64 %z, i1 false) + %zz = trunc i64 %p to i32 + ret i32 %zz +} + +define i16 @trunc_ctlz_zext_i16_i64(i16 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i16_i64( +; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0]] +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i16 [[TMP1]], 48 +; CHECK-NEXT: ret i16 [[ZZ]] +; + %z = zext i16 %x to i64 + %p = call i64 @llvm.ctlz.i64(i64 %z, i1 false) + %zz = trunc i64 %p to i16 + ret i16 %zz +} + +; Fixed vector cases + +define <2 x i16> @trunc_ctlz_zext_v2i16_v2i32(<2 x i16> %x) { +; CHECK-LABEL: @trunc_ctlz_zext_v2i16_v2i32( +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i16> [[TMP1]], +; CHECK-NEXT: ret <2 x i16> [[ZZ]] +; + %z = zext <2 x i16> %x to <2 x i32> + %p = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %z, i1 false) + %zz = trunc <2 x i32> %p to <2 x i16> + ret <2 x i16> %zz +} + +define <2 x i8> @trunc_ctlz_zext_v2i8_v2i32(<2 x i8> %x) { +; CHECK-LABEL: @trunc_ctlz_zext_v2i8_v2i32( +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i8> [[TMP1]], +; CHECK-NEXT: ret <2 x i8> [[ZZ]] +; + %z = zext <2 x i8> %x to <2 x i32> + %p = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %z, i1 false) + %zz = trunc <2 x i32> %p to <2 x i8> + ret <2 x i8> %zz +} + +define <2 x i8> @trunc_ctlz_zext_v2i8_v2i64(<2 x i8> %x) { +; CHECK-LABEL: @trunc_ctlz_zext_v2i8_v2i64( +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i8> [[TMP1]], +; CHECK-NEXT: ret <2 x i8> [[ZZ]] +; + %z = zext <2 x i8> %x to <2 x i64> + %p = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %z, i1 false) + %zz = trunc <2 x i64> %p to <2 x i8> + ret <2 x i8> %zz +} + +define <2 x i32> @trunc_ctlz_zext_v2i32_v2i64(<2 x i32> %x) { +; CHECK-LABEL: @trunc_ctlz_zext_v2i32_v2i64( +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i32> [[TMP1]], +; CHECK-NEXT: ret <2 x i32> [[ZZ]] +; + %z = zext <2 x i32> %x to <2 x i64> + %p = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %z, i1 false) + %zz = trunc <2 x i64> %p to <2 x i32> + ret <2 x i32> %zz +} + +define <2 x i16> @trunc_ctlz_zext_v2i16_v2i64(<2 x i16> %x) { +; CHECK-LABEL: @trunc_ctlz_zext_v2i16_v2i64( +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i16> [[TMP1]], +; CHECK-NEXT: ret <2 x i16> [[ZZ]] +; + %z = zext <2 x i16> %x to <2 x i64> + %p = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %z, i1 false) + %zz = trunc <2 x i64> %p to <2 x i16> + ret <2 x i16> %zz +} + +; Scalable vector cases + +define @trunc_ctlz_zext_nxv2i16_nxv2i32( %x) { +; CHECK-LABEL: @trunc_ctlz_zext_nxv2i16_nxv2i32( +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.ctlz.nxv2i16( [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw [[TMP1]], shufflevector ( insertelement ( undef, i16 16, i32 0), undef, zeroinitializer) +; CHECK-NEXT: ret [[ZZ]] +; + %z = zext %x to + %p = call @llvm.ctlz.nxv2i32( %z, i1 false) + %zz = trunc %p to + ret %zz +} + +define @trunc_ctlz_zext_nxv2i8_nxv2i32( %x) { +; CHECK-LABEL: @trunc_ctlz_zext_nxv2i8_nxv2i32( +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.ctlz.nxv2i8( [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw [[TMP1]], shufflevector ( insertelement ( undef, i8 24, i32 0), undef, zeroinitializer) +; CHECK-NEXT: ret [[ZZ]] +; + %z = zext %x to + %p = call @llvm.ctlz.nxv2i32( %z, i1 false) + %zz = trunc %p to + ret %zz +} + +define @trunc_ctlz_zext_nxv2i8_nxv2i64( %x) { +; CHECK-LABEL: @trunc_ctlz_zext_nxv2i8_nxv2i64( +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.ctlz.nxv2i8( [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw [[TMP1]], shufflevector ( insertelement ( undef, i8 56, i32 0), undef, zeroinitializer) +; CHECK-NEXT: ret [[ZZ]] +; + %z = zext %x to + %p = call @llvm.ctlz.nxv2i64( %z, i1 false) + %zz = trunc %p to + ret %zz +} + +define @trunc_ctlz_zext_nxv2i32_nxv2i64( %x) { +; CHECK-LABEL: @trunc_ctlz_zext_nxv2i32_nxv2i64( +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.ctlz.nxv2i32( [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw [[TMP1]], shufflevector ( insertelement ( undef, i32 32, i32 0), undef, zeroinitializer) +; CHECK-NEXT: ret [[ZZ]] +; + %z = zext %x to + %p = call @llvm.ctlz.nxv2i64( %z, i1 false) + %zz = trunc %p to + ret %zz +} + +define @trunc_ctlz_zext_nxv2i16_nxv2i64( %x) { +; CHECK-LABEL: @trunc_ctlz_zext_nxv2i16_nxv2i64( +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.ctlz.nxv2i16( [[X:%.*]], i1 false) +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw [[TMP1]], shufflevector ( insertelement ( undef, i16 48, i32 0), undef, zeroinitializer) +; CHECK-NEXT: ret [[ZZ]] +; + %z = zext %x to + %p = call @llvm.ctlz.nxv2i64( %z, i1 false) + %zz = trunc %p to + ret %zz +} + +; Multiple uses of ctlz for which the opt is disabled + +define i16 @trunc_ctlz_zext_i16_i32_multiple_uses(i16 %x) { +; CHECK-LABEL: @trunc_ctlz_zext_i16_i32_multiple_uses( +; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i32 +; CHECK-NEXT: [[P:%.*]] = call i32 @llvm.ctlz.i32(i32 [[Z]], i1 false), !range [[RNG3:![0-9]+]] +; CHECK-NEXT: [[X1:%.*]] = trunc i32 [[P]] to i16 +; CHECK-NEXT: [[Y:%.*]] = trunc i32 [[P]] to i16 +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i16 [[X1]], [[Y]] +; CHECK-NEXT: ret i16 [[ZZ]] +; + %z = zext i16 %x to i32 + %p = call i32 @llvm.ctlz.i32(i32 %z, i1 false) + %x1 = trunc i32 %p to i16 + %y = trunc i32 %p to i16 + %zz = add nsw i16 %x1, %y + ret i16 %zz +} + +define <2 x i16> @trunc_ctlz_zext_v2i16_v2i32_multiple_uses(<2 x i16> %x) { +; CHECK-LABEL: @trunc_ctlz_zext_v2i16_v2i32_multiple_uses( +; CHECK-NEXT: [[Z:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i32> +; CHECK-NEXT: [[P:%.*]] = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[Z]], i1 false) +; CHECK-NEXT: [[X1:%.*]] = trunc <2 x i32> [[P]] to <2 x i16> +; CHECK-NEXT: [[Y:%.*]] = trunc <2 x i32> [[P]] to <2 x i16> +; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i16> [[X1]], [[Y]] +; CHECK-NEXT: ret <2 x i16> [[ZZ]] +; + %z = zext <2 x i16> %x to <2 x i32> + %p = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %z, i1 false) + %x1 = trunc <2 x i32> %p to <2 x i16> + %y = trunc <2 x i32> %p to <2 x i16> + %zz = add nsw <2 x i16> %x1, %y + ret <2 x i16> %zz +} + +define @trunc_ctlz_zext_nxv2i16_nxv2i32_multiple_uses( %x) { +; CHECK-LABEL: @trunc_ctlz_zext_nxv2i16_nxv2i32_multiple_uses( +; CHECK-NEXT: [[Z:%.*]] = zext [[X:%.*]] to +; CHECK-NEXT: [[P:%.*]] = call @llvm.ctlz.nxv2i32( [[Z]], i1 false) +; CHECK-NEXT: [[X1:%.*]] = trunc [[P]] to +; CHECK-NEXT: [[Y:%.*]] = trunc [[P]] to +; CHECK-NEXT: [[ZZ:%.*]] = add nsw [[X1]], [[Y]] +; CHECK-NEXT: ret [[ZZ]] +; + %z = zext %x to + %p = call @llvm.ctlz.nxv2i32( %z, i1 false) + %x1 = trunc %p to + %y = trunc %p to + %zz = add nsw %x1, %y + ret %zz +} + +