Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -260,6 +260,7 @@ setOperationAction(ISD::ABS, VT, Legal); setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::CTLZ, VT, Legal); + setOperationAction(ISD::CTTZ, VT, Custom); // No native support for these. setOperationAction(ISD::UDIV, VT, Expand); @@ -5738,9 +5739,7 @@ const ARMSubtarget *ST) { SDLoc dl(N); EVT VT = N->getValueType(0); - if (VT.isVector()) { - assert(ST->hasNEON()); - + if (VT.isVector() && ST->hasNEON()) { // Compute the least significant set bit: LSB = X & -X SDValue X = N->getOperand(0); SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); Index: llvm/test/CodeGen/Thumb2/mve-cttz.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-cttz.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <4 x i32> @cttz_4i32_t(<4 x i32> %src){ +; CHECK-LABEL: cttz_4i32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vbrsr.32 q0, q0, #0 +; CHECK-NEXT: vclz.i32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %src) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @cttz_8i16_t(<8 x i16> %src){ +; CHECK-LABEL: cttz_8i16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vbrsr.16 q0, q0, #0 +; CHECK-NEXT: vclz.i16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %src) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <16 x i8> @cttz_16i8_t(<16 x i8> %src){ +; CHECK-LABEL: cttz_16i8_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vbrsr.8 q0, q0, #0 +; CHECK-NEXT: vclz.i8 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %src) + ret <16 x i8> %0 +} + +declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>) +declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>) +declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>)