diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -533,6 +533,10 @@ setOperationAction(ISD::ROTL, VT, Expand); setOperationAction(ISD::ROTR, VT, Expand); + setOperationAction(ISD::CTTZ, VT, Expand); + setOperationAction(ISD::CTLZ, VT, Expand); + setOperationAction(ISD::CTPOP, VT, Expand); + // Custom-lower extensions and truncations from/to mask types. setOperationAction(ISD::ANY_EXTEND, VT, Custom); setOperationAction(ISD::SIGN_EXTEND, VT, Custom); diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -0,0 +1,3017 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define @ctlz_nxv1i8( %va) { +; CHECK-LABEL: ctlz_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv1i8( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv1i8(, i1) + +define @ctlz_nxv2i8( %va) { +; CHECK-LABEL: ctlz_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv2i8( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv2i8(, i1) + +define @ctlz_nxv4i8( %va) { +; CHECK-LABEL: ctlz_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv4i8( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv4i8(, i1) + +define @ctlz_nxv8i8( %va) { +; CHECK-LABEL: ctlz_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv8i8( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv8i8(, i1) + +define @ctlz_nxv16i8( %va) { +; CHECK-LABEL: ctlz_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv16i8( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv16i8(, i1) + +define @ctlz_nxv32i8( %va) { +; CHECK-LABEL: ctlz_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv32i8( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv32i8(, i1) + +define @ctlz_nxv64i8( %va) { +; CHECK-LABEL: ctlz_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv64i8( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv64i8(, i1) + +define @ctlz_nxv1i16( %va) { +; RV32-LABEL: ctlz_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv1i16( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv1i16(, i1) + +define @ctlz_nxv2i16( %va) { +; RV32-LABEL: ctlz_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv2i16( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv2i16(, i1) + +define @ctlz_nxv4i16( %va) { +; RV32-LABEL: ctlz_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv4i16( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv4i16(, i1) + +define @ctlz_nxv8i16( %va) { +; RV32-LABEL: ctlz_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv8i16( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv8i16(, i1) + +define @ctlz_nxv16i16( %va) { +; RV32-LABEL: ctlz_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv16i16( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv16i16(, i1) + +define @ctlz_nxv32i16( %va) { +; RV32-LABEL: ctlz_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv32i16( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv32i16(, i1) + +define @ctlz_nxv1i32( %va) { +; RV32-LABEL: ctlz_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv1i32( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv1i32(, i1) + +define @ctlz_nxv2i32( %va) { +; RV32-LABEL: ctlz_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv2i32( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv2i32(, i1) + +define @ctlz_nxv4i32( %va) { +; RV32-LABEL: ctlz_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv4i32( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv4i32(, i1) + +define @ctlz_nxv8i32( %va) { +; RV32-LABEL: ctlz_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv8i32( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv8i32(, i1) + +define @ctlz_nxv16i32( %va) { +; RV32-LABEL: ctlz_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv16i32( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv16i32(, i1) + +define @ctlz_nxv1i64( %va) { +; RV32-LABEL: ctlz_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v9, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 1 +; RV32-NEXT: vand.vv v9, v11, v9 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: vand.vv v9, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v11 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv1i64( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv1i64(, i1) + +define @ctlz_nxv2i64( %va) { +; RV32-LABEL: ctlz_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v10, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 1 +; RV32-NEXT: vand.vv v10, v14, v10 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vand.vv v10, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v14 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv2i64( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv2i64(, i1) + +define @ctlz_nxv4i64( %va) { +; RV32-LABEL: ctlz_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v12, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 1 +; RV32-NEXT: vand.vv v12, v20, v12 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vand.vv v12, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v20 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv4i64( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv4i64(, i1) + +define @ctlz_nxv8i64( %va) { +; RV32-LABEL: ctlz_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v16, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 1 +; RV32-NEXT: vand.vv v16, v0, v16 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vand.vv v16, v8, v24 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v24 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v0 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vmul.vv v8, v8, v24 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv8i64( %va, i1 false) + ret %a +} +declare @llvm.ctlz.nxv8i64(, i1) + +define @ctlz_zero_undef_nxv1i8( %va) { +; CHECK-LABEL: ctlz_zero_undef_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv1i8( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv2i8( %va) { +; CHECK-LABEL: ctlz_zero_undef_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv2i8( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv4i8( %va) { +; CHECK-LABEL: ctlz_zero_undef_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv4i8( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv8i8( %va) { +; CHECK-LABEL: ctlz_zero_undef_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv8i8( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv16i8( %va) { +; CHECK-LABEL: ctlz_zero_undef_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv16i8( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv32i8( %va) { +; CHECK-LABEL: ctlz_zero_undef_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv32i8( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv64i8( %va) { +; CHECK-LABEL: ctlz_zero_undef_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctlz.nxv64i8( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv1i16( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv1i16( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv2i16( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv2i16( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv4i16( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv4i16( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv8i16( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv8i16( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv16i16( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv16i16( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv32i16( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv32i16( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv1i32( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv1i32( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv2i32( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv2i32( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv4i32( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv4i32( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv8i32( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv8i32( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv16i32( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv16i32( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv1i64( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v9, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 1 +; RV32-NEXT: vand.vv v9, v11, v9 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: vand.vv v9, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v11 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv1i64( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv2i64( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v10, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 1 +; RV32-NEXT: vand.vv v10, v14, v10 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vand.vv v10, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v14 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv2i64( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv4i64( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v12, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 1 +; RV32-NEXT: vand.vv v12, v20, v12 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vand.vv v12, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v20 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv4i64( %va, i1 true) + ret %a +} + +define @ctlz_zero_undef_nxv8i64( %va) { +; RV32-LABEL: ctlz_zero_undef_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v16, v8, a0 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 1 +; RV32-NEXT: vand.vv v16, v0, v16 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vand.vv v16, v8, v24 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v24 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v0 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vmul.vv v8, v8, v24 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctlz_zero_undef_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctlz.nxv8i64( %va, i1 true) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define @ctpop_nxv1i8( %va) { +; CHECK-LABEL: ctpop_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctpop.nxv1i8( %va) + ret %a +} +declare @llvm.ctpop.nxv1i8() + +define @ctpop_nxv2i8( %va) { +; CHECK-LABEL: ctpop_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctpop.nxv2i8( %va) + ret %a +} +declare @llvm.ctpop.nxv2i8() + +define @ctpop_nxv4i8( %va) { +; CHECK-LABEL: ctpop_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctpop.nxv4i8( %va) + ret %a +} +declare @llvm.ctpop.nxv4i8() + +define @ctpop_nxv8i8( %va) { +; CHECK-LABEL: ctpop_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctpop.nxv8i8( %va) + ret %a +} +declare @llvm.ctpop.nxv8i8() + +define @ctpop_nxv16i8( %va) { +; CHECK-LABEL: ctpop_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctpop.nxv16i8( %va) + ret %a +} +declare @llvm.ctpop.nxv16i8() + +define @ctpop_nxv32i8( %va) { +; CHECK-LABEL: ctpop_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctpop.nxv32i8( %va) + ret %a +} +declare @llvm.ctpop.nxv32i8() + +define @ctpop_nxv64i8( %va) { +; CHECK-LABEL: ctpop_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.ctpop.nxv64i8( %va) + ret %a +} +declare @llvm.ctpop.nxv64i8() + +define @ctpop_nxv1i16( %va) { +; RV32-LABEL: ctpop_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv1i16( %va) + ret %a +} +declare @llvm.ctpop.nxv1i16() + +define @ctpop_nxv2i16( %va) { +; RV32-LABEL: ctpop_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv2i16( %va) + ret %a +} +declare @llvm.ctpop.nxv2i16() + +define @ctpop_nxv4i16( %va) { +; RV32-LABEL: ctpop_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv4i16( %va) + ret %a +} +declare @llvm.ctpop.nxv4i16() + +define @ctpop_nxv8i16( %va) { +; RV32-LABEL: ctpop_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv8i16( %va) + ret %a +} +declare @llvm.ctpop.nxv8i16() + +define @ctpop_nxv16i16( %va) { +; RV32-LABEL: ctpop_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv16i16( %va) + ret %a +} +declare @llvm.ctpop.nxv16i16() + +define @ctpop_nxv32i16( %va) { +; RV32-LABEL: ctpop_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv32i16( %va) + ret %a +} +declare @llvm.ctpop.nxv32i16() + +define @ctpop_nxv1i32( %va) { +; RV32-LABEL: ctpop_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv1i32( %va) + ret %a +} +declare @llvm.ctpop.nxv1i32() + +define @ctpop_nxv2i32( %va) { +; RV32-LABEL: ctpop_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv2i32( %va) + ret %a +} +declare @llvm.ctpop.nxv2i32() + +define @ctpop_nxv4i32( %va) { +; RV32-LABEL: ctpop_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv4i32( %va) + ret %a +} +declare @llvm.ctpop.nxv4i32() + +define @ctpop_nxv8i32( %va) { +; RV32-LABEL: ctpop_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv8i32( %va) + ret %a +} +declare @llvm.ctpop.nxv8i32() + +define @ctpop_nxv16i32( %va) { +; RV32-LABEL: ctpop_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv16i32( %va) + ret %a +} +declare @llvm.ctpop.nxv16i32() + +define @ctpop_nxv1i64( %va) { +; RV32-LABEL: ctpop_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 1 +; RV32-NEXT: vand.vv v9, v11, v9 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: vand.vv v9, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v11 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv1i64( %va) + ret %a +} +declare @llvm.ctpop.nxv1i64() + +define @ctpop_nxv2i64( %va) { +; RV32-LABEL: ctpop_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 1 +; RV32-NEXT: vand.vv v10, v14, v10 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vand.vv v10, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v14 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv2i64( %va) + ret %a +} +declare @llvm.ctpop.nxv2i64() + +define @ctpop_nxv4i64( %va) { +; RV32-LABEL: ctpop_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 1 +; RV32-NEXT: vand.vv v12, v20, v12 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vand.vv v12, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v20 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv4i64( %va) + ret %a +} +declare @llvm.ctpop.nxv4i64() + +define @ctpop_nxv8i64( %va) { +; RV32-LABEL: ctpop_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 1 +; RV32-NEXT: vand.vv v16, v0, v16 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vand.vv v16, v8, v24 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v24 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v0 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vmul.vv v8, v8, v24 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: ctpop_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.ctpop.nxv8i64( %va) + ret %a +} +declare @llvm.ctpop.nxv8i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -0,0 +1,2555 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define @cttz_nxv1i8( %va) { +; CHECK-LABEL: cttz_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv1i8( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv1i8(, i1) + +define @cttz_nxv2i8( %va) { +; CHECK-LABEL: cttz_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv2i8( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv2i8(, i1) + +define @cttz_nxv4i8( %va) { +; CHECK-LABEL: cttz_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv4i8( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv4i8(, i1) + +define @cttz_nxv8i8( %va) { +; CHECK-LABEL: cttz_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv8i8( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv8i8(, i1) + +define @cttz_nxv16i8( %va) { +; CHECK-LABEL: cttz_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsub.vx v10, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv16i8( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv16i8(, i1) + +define @cttz_nxv32i8( %va) { +; CHECK-LABEL: cttz_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsub.vx v12, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv32i8( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv32i8(, i1) + +define @cttz_nxv64i8( %va) { +; CHECK-LABEL: cttz_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsub.vx v16, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv64i8( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv64i8(, i1) + +define @cttz_nxv1i16( %va) { +; RV32-LABEL: cttz_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv1i16( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv1i16(, i1) + +define @cttz_nxv2i16( %va) { +; RV32-LABEL: cttz_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv2i16( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv2i16(, i1) + +define @cttz_nxv4i16( %va) { +; RV32-LABEL: cttz_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv4i16( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv4i16(, i1) + +define @cttz_nxv8i16( %va) { +; RV32-LABEL: cttz_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-NEXT: vsub.vx v10, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-NEXT: vsub.vx v10, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv8i16( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv8i16(, i1) + +define @cttz_nxv16i16( %va) { +; RV32-LABEL: cttz_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-NEXT: vsub.vx v12, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-NEXT: vsub.vx v12, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv16i16( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv16i16(, i1) + +define @cttz_nxv32i16( %va) { +; RV32-LABEL: cttz_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsub.vx v16, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsub.vx v16, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv32i16( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv32i16(, i1) + +define @cttz_nxv1i32( %va) { +; RV32-LABEL: cttz_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv1i32( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv1i32(, i1) + +define @cttz_nxv2i32( %va) { +; RV32-LABEL: cttz_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv2i32( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv2i32(, i1) + +define @cttz_nxv4i32( %va) { +; RV32-LABEL: cttz_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsub.vx v10, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsub.vx v10, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv4i32( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv4i32(, i1) + +define @cttz_nxv8i32( %va) { +; RV32-LABEL: cttz_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsub.vx v12, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsub.vx v12, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv8i32( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv8i32(, i1) + +define @cttz_nxv16i32( %va) { +; RV32-LABEL: cttz_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsub.vx v16, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsub.vx v16, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv16i32( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv16i32(, i1) + +define @cttz_nxv1i64( %va) { +; RV32-LABEL: cttz_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 1 +; RV32-NEXT: vand.vv v10, v11, v10 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v11 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv1i64( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv1i64(, i1) + +define @cttz_nxv2i64( %va) { +; RV32-LABEL: cttz_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsub.vx v10, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 1 +; RV32-NEXT: vand.vv v12, v14, v12 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v14 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsub.vx v10, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv2i64( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv2i64(, i1) + +define @cttz_nxv4i64( %va) { +; RV32-LABEL: cttz_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsub.vx v12, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 1 +; RV32-NEXT: vand.vv v16, v20, v16 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v20 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsub.vx v12, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv4i64( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv4i64(, i1) + +define @cttz_nxv8i64( %va) { +; RV32-LABEL: cttz_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsub.vx v16, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 1 +; RV32-NEXT: vand.vv v24, v0, v24 +; RV32-NEXT: vsub.vv v8, v8, v24 +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v0 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vmul.vv v8, v8, v24 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsub.vx v16, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv8i64( %va, i1 false) + ret %a +} +declare @llvm.cttz.nxv8i64(, i1) + +define @cttz_zero_undef_nxv1i8( %va) { +; CHECK-LABEL: cttz_zero_undef_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv1i8( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv2i8( %va) { +; CHECK-LABEL: cttz_zero_undef_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv2i8( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv4i8( %va) { +; CHECK-LABEL: cttz_zero_undef_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv4i8( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv8i8( %va) { +; CHECK-LABEL: cttz_zero_undef_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsub.vx v9, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv8i8( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv16i8( %va) { +; CHECK-LABEL: cttz_zero_undef_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsub.vx v10, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv16i8( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv32i8( %va) { +; CHECK-LABEL: cttz_zero_undef_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsub.vx v12, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv32i8( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv64i8( %va) { +; CHECK-LABEL: cttz_zero_undef_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsub.vx v16, v8, a0 +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %a = call @llvm.cttz.nxv64i8( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv1i16( %va) { +; RV32-LABEL: cttz_zero_undef_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv1i16( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv2i16( %va) { +; RV32-LABEL: cttz_zero_undef_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv2i16( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv4i16( %va) { +; RV32-LABEL: cttz_zero_undef_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv4i16( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv8i16( %va) { +; RV32-LABEL: cttz_zero_undef_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-NEXT: vsub.vx v10, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-NEXT: vsub.vx v10, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv8i16( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv16i16( %va) { +; RV32-LABEL: cttz_zero_undef_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-NEXT: vsub.vx v12, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-NEXT: vsub.vx v12, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv16i16( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv32i16( %va) { +; RV32-LABEL: cttz_zero_undef_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsub.vx v16, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsub.vx v16, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv32i16( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv1i32( %va) { +; RV32-LABEL: cttz_zero_undef_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv1i32( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv2i32( %va) { +; RV32-LABEL: cttz_zero_undef_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv2i32( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv4i32( %va) { +; RV32-LABEL: cttz_zero_undef_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsub.vx v10, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsub.vx v10, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv4i32( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv8i32( %va) { +; RV32-LABEL: cttz_zero_undef_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsub.vx v12, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsub.vx v12, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv8i32( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv16i32( %va) { +; RV32-LABEL: cttz_zero_undef_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsub.vx v16, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsub.vx v16, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv16i32( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv1i64( %va) { +; RV32-LABEL: cttz_zero_undef_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsub.vx v9, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 1 +; RV32-NEXT: vand.vv v10, v11, v10 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v11, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v11 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsub.vx v9, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv1i64( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv2i64( %va) { +; RV32-LABEL: cttz_zero_undef_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsub.vx v10, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 1 +; RV32-NEXT: vand.vv v12, v14, v12 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v14, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v14 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsub.vx v10, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv2i64( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv4i64( %va) { +; RV32-LABEL: cttz_zero_undef_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsub.vx v12, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 1 +; RV32-NEXT: vand.vv v16, v20, v16 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v20, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v20 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsub.vx v12, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv4i64( %va, i1 true) + ret %a +} + +define @cttz_zero_undef_nxv8i64( %va) { +; RV32-LABEL: cttz_zero_undef_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsub.vx v16, v8, a0 +; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 1 +; RV32-NEXT: vand.vv v24, v0, v24 +; RV32-NEXT: vsub.vv v8, v8, v24 +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsrl.vi v0, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v0 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vmul.vv v8, v8, v24 +; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: cttz_zero_undef_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsub.vx v16, v8, a0 +; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 21845 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 13107 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3855 +; RV64-NEXT: addiw a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, 241 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: slli a0, a0, 16 +; RV64-NEXT: addi a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call @llvm.cttz.nxv8i64( %va, i1 true) + ret %a +}