Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -259,6 +259,7 @@ setOperationAction(ISD::UMAX, VT, Legal); setOperationAction(ISD::ABS, VT, Legal); setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::CTLZ, VT, Legal); // No native support for these. setOperationAction(ISD::UDIV, VT, Expand); Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -1724,6 +1724,16 @@ def MVE_VCLZs16 : MVE_VCLSCLZ<"vclz", "i16", 0b01, 0b1>; def MVE_VCLZs32 : MVE_VCLSCLZ<"vclz", "i32", 0b10, 0b1>; +let Predicates = [HasMVEInt] in { + def : Pat<(v16i8 ( ctlz (v16i8 MQPR:$val1))), + (v16i8 ( MVE_VCLZs8 (v16i8 MQPR:$val1)))>; + def : Pat<(v4i32 ( ctlz (v4i32 MQPR:$val1))), + (v4i32 ( MVE_VCLZs32 (v4i32 MQPR:$val1)))>; + def : Pat<(v8i16 ( ctlz (v8i16 MQPR:$val1))), + (v8i16 ( MVE_VCLZs16 (v8i16 MQPR:$val1)))>; + +} + class MVE_VABSNEG_int size, bit negate, list pattern=[]> : MVEIntSingleSrc { Index: llvm/test/CodeGen/Thumb2/mve-ctlz.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-ctlz.ll @@ -0,0 +1,148 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -verify-machineinstrs -mattr=+mve %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <2 x i64> @ctlz_2i64_0_t(<2 x i64> %src){ +; CHECK-LABEL: ctlz_2i64_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov q1, q0 +; CHECK-NEXT: vmov r0, s7 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: clz r2, r2 +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: add.w r2, r2, #32 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r2, r0 +; CHECK-NEXT: vmov r0, s5 +; CHECK-NEXT: vmov s2, r2 +; CHECK-NEXT: vmov r2, s4 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: clz r2, r2 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r1, #1 +; CHECK-NEXT: adds r2, #32 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r2, r0 +; CHECK-NEXT: vmov s0, r2 +; CHECK-NEXT: vldr s1, .LCPI0_0 +; CHECK-NEXT: vmov.f32 s3, s1 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 0 @ float 0 +entry: + %0 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %src, i1 0) + ret <2 x i64> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @ctlz_4i32_0_t(<4 x i32> %src){ +; CHECK-LABEL: ctlz_4i32_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %src, i1 0) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @ctlz_8i16_0_t(<8 x i16> %src){ +; CHECK-LABEL: ctlz_8i16_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %src, i1 0) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <16 x i8> @ctlz_16i8_0_t(<16 x i8> %src){ +; CHECK-LABEL: ctlz_16i8_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i8 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %src, i1 0) + ret <16 x i8> %0 +} + +define arm_aapcs_vfpcc <2 x i64> @ctlz_2i64_1_t(<2 x i64> %src){ +; CHECK-LABEL: ctlz_2i64_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov q1, q0 +; CHECK-NEXT: vmov r0, s7 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: clz r2, r2 +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: add.w r2, r2, #32 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r2, r0 +; CHECK-NEXT: vmov r0, s5 +; CHECK-NEXT: vmov s2, r2 +; CHECK-NEXT: vmov r2, s4 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: clz r2, r2 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r1, #1 +; CHECK-NEXT: adds r2, #32 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r2, r0 +; CHECK-NEXT: vmov s0, r2 +; CHECK-NEXT: vldr s1, .LCPI4_0 +; CHECK-NEXT: vmov.f32 s3, s1 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI4_0: +; CHECK-NEXT: .long 0 @ float 0 +entry: + %0 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %src, i1 1) + ret <2 x i64> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @ctlz_4i32_1_t(<4 x i32> %src){ +; CHECK-LABEL: ctlz_4i32_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %src, i1 1) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @ctlz_8i16_1_t(<8 x i16> %src){ +; CHECK-LABEL: ctlz_8i16_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %src, i1 1) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <16 x i8> @ctlz_16i8_1_t(<16 x i8> %src){ +; CHECK-LABEL: ctlz_16i8_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i8 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %src, i1 1) + ret <16 x i8> %0 +} + + +declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) +declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) +declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) +declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1)