diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -681,9 +681,6 @@ setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand); - setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, VT, Expand); - - setOperationAction(ISD::BSWAP, VT, Expand); setOperationAction({ISD::VP_BSWAP, ISD::VP_BITREVERSE}, VT, Expand); setOperationAction({ISD::VP_FSHL, ISD::VP_FSHR}, VT, Expand); setOperationAction({ISD::VP_CTLZ, ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ, @@ -754,13 +751,20 @@ // Splice setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); - // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the range - // of f32. - EVT FloatVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); - if (isTypeLegal(FloatVT)) { - setOperationAction( - {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, - Custom); + if (Subtarget.hasStdExtZvbb()) { + setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, VT, Legal); + } else { + setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, VT, Expand); + setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, VT, Expand); + + // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the + // range of f32. + EVT FloatVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); + if (isTypeLegal(FloatVT)) { + setOperationAction( + {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, + Custom); + } } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1942,6 +1942,8 @@ include "RISCVInstrInfoZc.td" include "RISCVInstrInfoZk.td" include "RISCVInstrInfoV.td" +include "RISCVInstrInfoZvfbf.td" +include "RISCVInstrInfoZvk.td" include "RISCVInstrInfoZfa.td" include "RISCVInstrInfoZfbfmin.td" include "RISCVInstrInfoZfh.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -1785,6 +1785,4 @@ } } // Predicates = [HasVInstructionsI64, IsRV64] -include "RISCVInstrInfoZvfbf.td" -include "RISCVInstrInfoZvk.td" include "RISCVInstrInfoVPseudos.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td @@ -177,3 +177,47 @@ def VSM3C_VI : PALUVINoVm<0b101011, "vsm3c.vi", uimm5>; def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">; } // Predicates = [HasStdExtZvksh] + +//===----------------------------------------------------------------------===// +// Pseudo instructions +//===----------------------------------------------------------------------===// + +multiclass VPseudoUnaryV_V { + foreach m = MxList in { + let VLMul = m.value in { + def "_V_" # m.MX : VPseudoUnaryNoMask; + def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask, + RISCVMaskedPseudo; + } + } +} + +defm PseudoVBREV : VPseudoUnaryV_V; +defm PseudoVREV8 : VPseudoUnaryV_V; +defm PseudoVCLZ : VPseudoUnaryV_V; +defm PseudoVCTZ : VPseudoUnaryV_V; +defm PseudoVCPOP : VPseudoUnaryV_V; + +//===----------------------------------------------------------------------===// +// SDNode patterns +//===----------------------------------------------------------------------===// + +multiclass VPatUnarySDNode_V { + foreach vti = AllIntegerVectors in { + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1))), + (!cast(instruction_name#"_V_"#vti.LMul.MX) + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, + vti.AVL, vti.Log2SEW, TA_MA)>; + } + } +} + +let Predicates = [HasStdExtZvbb] in { + defm : VPatUnarySDNode_V; + defm : VPatUnarySDNode_V; + defm : VPatUnarySDNode_V; + defm : VPatUnarySDNode_V; + defm : VPatUnarySDNode_V; +} diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB define @bitreverse_nxv1i8( %va) { ; CHECK-LABEL: bitreverse_nxv1i8: @@ -24,6 +26,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv1i8( %va) ret %a } @@ -51,6 +59,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv2i8( %va) ret %a } @@ -78,6 +92,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv4i8( %va) ret %a } @@ -105,6 +125,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv8i8( %va) ret %a } @@ -132,6 +158,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv16i8( %va) ret %a } @@ -159,6 +191,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv32i8( %va) ret %a } @@ -186,6 +224,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv64i8( %va) ret %a } @@ -249,6 +293,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv1i16( %va) ret %a } @@ -312,6 +362,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv2i16( %va) ret %a } @@ -375,6 +431,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv4i16( %va) ret %a } @@ -438,6 +500,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv8i16( %va) ret %a } @@ -501,6 +569,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv16i16( %va) ret %a } @@ -564,6 +638,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv32i16( %va) ret %a } @@ -643,6 +723,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv1i32( %va) ret %a } @@ -722,6 +808,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv2i32( %va) ret %a } @@ -801,6 +893,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv4i32( %va) ret %a } @@ -880,6 +978,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv8i32( %va) ret %a } @@ -959,6 +1063,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv16i32( %va) ret %a } @@ -1096,6 +1206,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv1i64( %va) ret %a } @@ -1233,6 +1349,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv2i64( %va) ret %a } @@ -1370,6 +1492,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv4i64( %va) ret %a } @@ -1521,6 +1649,12 @@ ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bitreverse_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vbrev.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bitreverse.nxv8i64( %va) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB define @bswap_nxv1i16( %va) { ; CHECK-LABEL: bswap_nxv1i16: @@ -10,6 +12,12 @@ ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv1i16( %va) ret %a } @@ -23,6 +31,12 @@ ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv2i16( %va) ret %a } @@ -36,6 +50,12 @@ ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv4i16( %va) ret %a } @@ -49,6 +69,12 @@ ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv8i16( %va) ret %a } @@ -62,6 +88,12 @@ ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv16i16( %va) ret %a } @@ -75,6 +107,12 @@ ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv32i16( %va) ret %a } @@ -112,6 +150,12 @@ ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv1i32( %va) ret %a } @@ -149,6 +193,12 @@ ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv2i32( %va) ret %a } @@ -186,6 +236,12 @@ ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv4i32( %va) ret %a } @@ -223,6 +279,12 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv8i32( %va) ret %a } @@ -260,6 +322,12 @@ ; RV64-NEXT: vor.vv v8, v8, v24 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv16i32( %va) ret %a } @@ -337,6 +405,12 @@ ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv1i64( %va) ret %a } @@ -414,6 +488,12 @@ ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv2i64( %va) ret %a } @@ -491,6 +571,12 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv4i64( %va) ret %a } @@ -582,6 +668,12 @@ ; RV64-NEXT: vor.vv v8, v8, v24 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: bswap_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vrev8.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.bswap.nxv8i64( %va) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -5,6 +5,8 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64f,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-F,RV64 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-D,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-D,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB define @ctlz_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv1i8: @@ -58,6 +60,12 @@ ; CHECK-D-NEXT: li a0, 8 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i8( %va, i1 false) ret %a } @@ -115,6 +123,12 @@ ; CHECK-D-NEXT: li a0, 8 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i8( %va, i1 false) ret %a } @@ -172,6 +186,12 @@ ; CHECK-D-NEXT: li a0, 8 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i8( %va, i1 false) ret %a } @@ -229,6 +249,12 @@ ; CHECK-D-NEXT: li a0, 8 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i8( %va, i1 false) ret %a } @@ -286,6 +312,12 @@ ; CHECK-D-NEXT: li a0, 8 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv16i8( %va, i1 false) ret %a } @@ -315,6 +347,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv32i8( %va, i1 false) ret %a } @@ -344,6 +382,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv64i8( %va, i1 false) ret %a } @@ -437,6 +481,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i16( %va, i1 false) ret %a } @@ -530,6 +580,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i16( %va, i1 false) ret %a } @@ -623,6 +679,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i16( %va, i1 false) ret %a } @@ -716,6 +778,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i16( %va, i1 false) ret %a } @@ -809,6 +877,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv16i16( %va, i1 false) ret %a } @@ -880,6 +954,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv32i16( %va, i1 false) ret %a } @@ -985,6 +1065,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i32( %va, i1 false) ret %a } @@ -1090,6 +1176,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i32( %va, i1 false) ret %a } @@ -1195,6 +1287,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i32( %va, i1 false) ret %a } @@ -1300,6 +1398,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vminu.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i32( %va, i1 false) ret %a } @@ -1403,6 +1507,12 @@ ; CHECK-D-NEXT: vminu.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv16i32( %va, i1 false) ret %a } @@ -1544,6 +1654,12 @@ ; CHECK-D-NEXT: vminu.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i64( %va, i1 false) ret %a } @@ -1685,6 +1801,12 @@ ; CHECK-D-NEXT: vminu.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i64( %va, i1 false) ret %a } @@ -1826,6 +1948,12 @@ ; CHECK-D-NEXT: vminu.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i64( %va, i1 false) ret %a } @@ -1967,6 +2095,12 @@ ; CHECK-D-NEXT: vminu.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i64( %va, i1 false) ret %a } @@ -2020,6 +2154,12 @@ ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i8( %va, i1 true) ret %a } @@ -2072,6 +2212,12 @@ ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i8( %va, i1 true) ret %a } @@ -2124,6 +2270,12 @@ ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i8( %va, i1 true) ret %a } @@ -2176,6 +2328,12 @@ ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v10, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i8( %va, i1 true) ret %a } @@ -2228,6 +2386,12 @@ ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v12, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv16i8( %va, i1 true) ret %a } @@ -2256,6 +2420,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv32i8( %va, i1 true) ret %a } @@ -2284,6 +2454,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv64i8( %va, i1 true) ret %a } @@ -2372,6 +2548,12 @@ ; CHECK-D-NEXT: li a0, 142 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i16( %va, i1 true) ret %a } @@ -2460,6 +2642,12 @@ ; CHECK-D-NEXT: li a0, 142 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i16( %va, i1 true) ret %a } @@ -2548,6 +2736,12 @@ ; CHECK-D-NEXT: li a0, 142 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i16( %va, i1 true) ret %a } @@ -2636,6 +2830,12 @@ ; CHECK-D-NEXT: li a0, 142 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i16( %va, i1 true) ret %a } @@ -2724,6 +2924,12 @@ ; CHECK-D-NEXT: li a0, 142 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv16i16( %va, i1 true) ret %a } @@ -2794,6 +3000,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv32i16( %va, i1 true) ret %a } @@ -2894,6 +3106,12 @@ ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i32( %va, i1 true) ret %a } @@ -2994,6 +3212,12 @@ ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v10, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i32( %va, i1 true) ret %a } @@ -3094,6 +3318,12 @@ ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v12, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i32( %va, i1 true) ret %a } @@ -3194,6 +3424,12 @@ ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v16, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i32( %va, i1 true) ret %a } @@ -3292,6 +3528,12 @@ ; CHECK-D-NEXT: vrsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv16i32( %va, i1 true) ret %a } @@ -3428,6 +3670,12 @@ ; CHECK-D-NEXT: vrsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv1i64( %va, i1 true) ret %a } @@ -3564,6 +3812,12 @@ ; CHECK-D-NEXT: vrsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv2i64( %va, i1 true) ret %a } @@ -3700,6 +3954,12 @@ ; CHECK-D-NEXT: vrsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv4i64( %va, i1 true) ret %a } @@ -3836,6 +4096,12 @@ ; CHECK-D-NEXT: vrsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctlz_zero_undef_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctlz.nxv8i64( %va, i1 true) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB define @ctpop_nxv1i8( %va) { ; CHECK-LABEL: ctpop_nxv1i8: @@ -19,6 +21,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv1i8( %va) ret %a } @@ -41,6 +49,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv2i8( %va) ret %a } @@ -63,6 +77,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv4i8( %va) ret %a } @@ -85,6 +105,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i8( %va) ret %a } @@ -107,6 +133,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv16i8( %va) ret %a } @@ -129,6 +161,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv32i8( %va) ret %a } @@ -151,6 +189,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv64i8( %va) ret %a } @@ -204,6 +248,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv1i16( %va) ret %a } @@ -257,6 +307,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv2i16( %va) ret %a } @@ -310,6 +366,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv4i16( %va) ret %a } @@ -363,6 +425,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i16( %va) ret %a } @@ -416,6 +484,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv16i16( %va) ret %a } @@ -469,6 +543,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv32i16( %va) ret %a } @@ -524,6 +604,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv1i32( %va) ret %a } @@ -579,6 +665,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv2i32( %va) ret %a } @@ -634,6 +726,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv4i32( %va) ret %a } @@ -689,6 +787,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i32( %va) ret %a } @@ -744,6 +848,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv16i32( %va) ret %a } @@ -828,6 +938,12 @@ ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv1i64( %va) ret %a } @@ -912,6 +1028,12 @@ ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv2i64( %va) ret %a } @@ -996,6 +1118,12 @@ ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv4i64( %va) ret %a } @@ -1080,6 +1208,12 @@ ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: ctpop_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vcpop.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i64( %va) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -5,6 +5,8 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64f,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-F,RV64,RV64F ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-D,RV32,RV32D ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-D,RV64,RV64D +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB define @cttz_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv1i8: @@ -61,6 +63,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v9, a0 ; CHECK-D-NEXT: vmerge.vim v8, v8, 8, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i8( %va, i1 false) ret %a } @@ -121,6 +129,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v9, a0 ; CHECK-D-NEXT: vmerge.vim v8, v8, 8, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i8( %va, i1 false) ret %a } @@ -181,6 +195,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v9, a0 ; CHECK-D-NEXT: vmerge.vim v8, v8, 8, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i8( %va, i1 false) ret %a } @@ -241,6 +261,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v9, a0 ; CHECK-D-NEXT: vmerge.vim v8, v8, 8, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i8( %va, i1 false) ret %a } @@ -301,6 +327,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v10, a0 ; CHECK-D-NEXT: vmerge.vim v8, v8, 8, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv16i8( %va, i1 false) ret %a } @@ -327,6 +359,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv32i8( %va, i1 false) ret %a } @@ -353,6 +391,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv64i8( %va, i1 false) ret %a } @@ -442,6 +486,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vmerge.vxm v8, v9, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i16( %va, i1 false) ret %a } @@ -531,6 +581,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vmerge.vxm v8, v9, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i16( %va, i1 false) ret %a } @@ -620,6 +676,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vmerge.vxm v8, v9, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i16( %va, i1 false) ret %a } @@ -709,6 +771,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vmerge.vxm v8, v10, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i16( %va, i1 false) ret %a } @@ -798,6 +866,12 @@ ; CHECK-D-NEXT: li a0, 16 ; CHECK-D-NEXT: vmerge.vxm v8, v12, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv16i16( %va, i1 false) ret %a } @@ -859,6 +933,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv32i16( %va, i1 false) ret %a } @@ -956,6 +1036,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vmerge.vxm v8, v9, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i32( %va, i1 false) ret %a } @@ -1053,6 +1139,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vmerge.vxm v8, v9, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i32( %va, i1 false) ret %a } @@ -1150,6 +1242,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vmerge.vxm v8, v10, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i32( %va, i1 false) ret %a } @@ -1247,6 +1345,12 @@ ; CHECK-D-NEXT: li a0, 32 ; CHECK-D-NEXT: vmerge.vxm v8, v12, a0, v0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i32( %va, i1 false) ret %a } @@ -1342,6 +1446,12 @@ ; CHECK-D-NEXT: vmerge.vxm v8, v16, a1, v0 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv16i32( %va, i1 false) ret %a } @@ -1506,6 +1616,12 @@ ; RV64D-NEXT: vmerge.vxm v8, v9, a1, v0 ; RV64D-NEXT: fsrm a0 ; RV64D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i64( %va, i1 false) ret %a } @@ -1670,6 +1786,12 @@ ; RV64D-NEXT: vmerge.vxm v8, v10, a1, v0 ; RV64D-NEXT: fsrm a0 ; RV64D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i64( %va, i1 false) ret %a } @@ -1834,6 +1956,12 @@ ; RV64D-NEXT: vmerge.vxm v8, v12, a1, v0 ; RV64D-NEXT: fsrm a0 ; RV64D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i64( %va, i1 false) ret %a } @@ -1998,6 +2126,12 @@ ; RV64D-NEXT: vmerge.vxm v8, v16, a1, v0 ; RV64D-NEXT: fsrm a0 ; RV64D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i64( %va, i1 false) ret %a } @@ -2054,6 +2188,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i8( %va, i1 true) ret %a } @@ -2109,6 +2249,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i8( %va, i1 true) ret %a } @@ -2164,6 +2310,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i8( %va, i1 true) ret %a } @@ -2219,6 +2371,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v10, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i8( %va, i1 true) ret %a } @@ -2274,6 +2432,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v12, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv16i8( %va, i1 true) ret %a } @@ -2299,6 +2463,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv32i8( %va, i1 true) ret %a } @@ -2324,6 +2494,12 @@ ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv64i8( %va, i1 true) ret %a } @@ -2406,6 +2582,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i16( %va, i1 true) ret %a } @@ -2488,6 +2670,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i16( %va, i1 true) ret %a } @@ -2570,6 +2758,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i16( %va, i1 true) ret %a } @@ -2652,6 +2846,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i16( %va, i1 true) ret %a } @@ -2734,6 +2934,12 @@ ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv16i16( %va, i1 true) ret %a } @@ -2794,6 +3000,12 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv32i16( %va, i1 true) ret %a } @@ -2884,6 +3096,12 @@ ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i32( %va, i1 true) ret %a } @@ -2974,6 +3192,12 @@ ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v10, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i32( %va, i1 true) ret %a } @@ -3064,6 +3288,12 @@ ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v12, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i32( %va, i1 true) ret %a } @@ -3154,6 +3384,12 @@ ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v16, a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i32( %va, i1 true) ret %a } @@ -3242,6 +3478,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv16i32( %va, i1 true) ret %a } @@ -3363,6 +3605,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv1i64( %va, i1 true) ret %a } @@ -3484,6 +3732,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv2i64( %va, i1 true) ret %a } @@ -3605,6 +3859,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv4i64( %va, i1 true) ret %a } @@ -3726,6 +3986,12 @@ ; CHECK-D-NEXT: vsub.vx v8, v8, a1 ; CHECK-D-NEXT: fsrm a0 ; CHECK-D-NEXT: ret +; +; CHECK-ZVBB-LABEL: cttz_zero_undef_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vctz.v v8, v8 +; CHECK-ZVBB-NEXT: ret %a = call @llvm.cttz.nxv8i64( %va, i1 true) ret %a }