diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td @@ -274,6 +274,45 @@ } } +foreach vti = AllIntegerVectors in { + let Predicates = !listconcat([HasStdExtZvbb], + GetVTypePredicates.Predicates) in { + def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl + (vti.Vector vti.RegClass:$rs1), + (riscv_splat_vector -1), + (vti.Vector vti.RegClass:$merge), + (vti.Mask V0), + VLOpFrag), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$merge), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs2, + vti.RegClass:$rs1, + (vti.Mask V0), + GPR:$vl, + vti.Log2SEW, + TAIL_AGNOSTIC)>; + + def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector + (not vti.ScalarRegClass:$rs1)), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$merge), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs2, + vti.ScalarRegClass:$rs1, + (vti.Mask V0), + GPR:$vl, + vti.Log2SEW, + TAIL_AGNOSTIC)>; + } +} + defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll @@ -0,0 +1,1432 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32 +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64 + +declare @llvm.vp.and.nxv1i8(, , , i32) +declare @llvm.vp.xor.nxv1i8(, , , i32) + +define @vandn_vv_vp_nxv1i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i8( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv1i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i8( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv1i8(i8 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i8 %a, -1 + %head.not.a = insertelement poison, i8 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv1i8( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv2i8(, , , i32) +declare @llvm.vp.xor.nxv2i8(, , , i32) + +define @vandn_vv_vp_nxv2i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i8( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv2i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i8( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv2i8(i8 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i8 %a, -1 + %head.not.a = insertelement poison, i8 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv2i8( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv4i8(, , , i32) +declare @llvm.vp.xor.nxv4i8(, , , i32) + +define @vandn_vv_vp_nxv4i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i8( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv4i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i8( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv4i8(i8 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i8 %a, -1 + %head.not.a = insertelement poison, i8 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv4i8( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv8i8(, , , i32) +declare @llvm.vp.xor.nxv8i8(, , , i32) + +define @vandn_vv_vp_nxv8i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i8( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv8i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i8( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv8i8(i8 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i8 %a, -1 + %head.not.a = insertelement poison, i8 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv8i8( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv16i8(, , , i32) +declare @llvm.vp.xor.nxv16i8(, , , i32) + +define @vandn_vv_vp_nxv16i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv16i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv16i8( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv16i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv16i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv16i8( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv16i8(i8 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i8 %a, -1 + %head.not.a = insertelement poison, i8 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv16i8( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv32i8(, , , i32) +declare @llvm.vp.xor.nxv32i8(, , , i32) + +define @vandn_vv_vp_nxv32i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv32i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv32i8( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv32i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv32i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv32i8( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv32i8(i8 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i8 %a, -1 + %head.not.a = insertelement poison, i8 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv32i8( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv64i8(, , , i32) +declare @llvm.vp.xor.nxv64i8(, , , i32) + +define @vandn_vv_vp_nxv64i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv64i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv64i8( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv64i8( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv64i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv64i8( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv64i8(i8 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i8 %a, -1 + %head.not.a = insertelement poison, i8 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv64i8( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv1i16(, , , i32) +declare @llvm.vp.xor.nxv1i16(, , , i32) + +define @vandn_vv_vp_nxv1i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i16( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv1i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i16( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv1i16(i16 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i16 %a, -1 + %head.not.a = insertelement poison, i16 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv1i16( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv2i16(, , , i32) +declare @llvm.vp.xor.nxv2i16(, , , i32) + +define @vandn_vv_vp_nxv2i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i16( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv2i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i16( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv2i16(i16 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i16 %a, -1 + %head.not.a = insertelement poison, i16 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv2i16( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv4i16(, , , i32) +declare @llvm.vp.xor.nxv4i16(, , , i32) + +define @vandn_vv_vp_nxv4i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i16( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv4i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i16( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv4i16(i16 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i16 %a, -1 + %head.not.a = insertelement poison, i16 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv4i16( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv8i16(, , , i32) +declare @llvm.vp.xor.nxv8i16(, , , i32) + +define @vandn_vv_vp_nxv8i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i16( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv8i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i16( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv8i16(i16 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i16 %a, -1 + %head.not.a = insertelement poison, i16 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv8i16( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv16i16(, , , i32) +declare @llvm.vp.xor.nxv16i16(, , , i32) + +define @vandn_vv_vp_nxv16i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv16i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv16i16( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv16i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv16i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv16i16( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv16i16(i16 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i16 %a, -1 + %head.not.a = insertelement poison, i16 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv16i16( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv32i16(, , , i32) +declare @llvm.vp.xor.nxv32i16(, , , i32) + +define @vandn_vv_vp_nxv32i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv32i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv32i16( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv32i16( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv32i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv32i16( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv32i16(i16 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i16 %a, -1 + %head.not.a = insertelement poison, i16 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv32i16( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv1i32(, , , i32) +declare @llvm.vp.xor.nxv1i32(, , , i32) + +define @vandn_vv_vp_nxv1i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i32( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv1i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i32( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv1i32(i32 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i32 %a, -1 + %head.not.a = insertelement poison, i32 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv1i32( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv2i32(, , , i32) +declare @llvm.vp.xor.nxv2i32(, , , i32) + +define @vandn_vv_vp_nxv2i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i32( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv2i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i32( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv2i32(i32 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i32 %a, -1 + %head.not.a = insertelement poison, i32 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv2i32( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv4i32(, , , i32) +declare @llvm.vp.xor.nxv4i32(, , , i32) + +define @vandn_vv_vp_nxv4i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i32( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv4i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i32( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv4i32(i32 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i32 %a, -1 + %head.not.a = insertelement poison, i32 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv4i32( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv8i32(, , , i32) +declare @llvm.vp.xor.nxv8i32(, , , i32) + +define @vandn_vv_vp_nxv8i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i32( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv8i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i32( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv8i32(i32 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i32 %a, -1 + %head.not.a = insertelement poison, i32 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv8i32( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv16i32(, , , i32) +declare @llvm.vp.xor.nxv16i32(, , , i32) + +define @vandn_vv_vp_nxv16i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv16i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv16i32( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv16i32( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv16i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv16i32( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv16i32(i32 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vx_vp_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = xor i32 %a, -1 + %head.not.a = insertelement poison, i32 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv16i32( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv1i64(, , , i32) +declare @llvm.vp.xor.nxv1i64(, , , i32) + +define @vandn_vv_vp_nxv1i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i64( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv1i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv1i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv1i64( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv1i64(i64 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-RV32-LABEL: vandn_vx_vp_nxv1i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-RV32-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_vp_nxv1i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv1i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v9, (a0), zero +; CHECK-ZVBB32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-ZVBB32-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv1i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB64-NEXT: ret + %not.a = xor i64 %a, -1 + %head.not.a = insertelement poison, i64 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv1i64( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv2i64(, , , i32) +declare @llvm.vp.xor.nxv2i64(, , , i32) + +define @vandn_vv_vp_nxv2i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i64( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv2i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv2i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv2i64( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv2i64(i64 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-RV32-LABEL: vandn_vx_vp_nxv2i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-RV32-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_vp_nxv2i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv2i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v10, (a0), zero +; CHECK-ZVBB32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-ZVBB32-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv2i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB64-NEXT: ret + %not.a = xor i64 %a, -1 + %head.not.a = insertelement poison, i64 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv2i64( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv4i64(, , , i32) +declare @llvm.vp.xor.nxv4i64(, , , i32) + +define @vandn_vv_vp_nxv4i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i64( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv4i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv4i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv4i64( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv4i64(i64 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-RV32-LABEL: vandn_vx_vp_nxv4i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-RV32-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_vp_nxv4i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv4i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v12, (a0), zero +; CHECK-ZVBB32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-ZVBB32-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv4i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB64-NEXT: ret + %not.a = xor i64 %a, -1 + %head.not.a = insertelement poison, i64 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv4i64( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} + +declare @llvm.vp.and.nxv8i64(, , , i32) +declare @llvm.vp.xor.nxv8i64(, , , i32) + +define @vandn_vv_vp_nxv8i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i64( %not.a, %b, %mask, i32 %evl) + ret %x +} + +define @vandn_vv_vp_swapped_nxv8i64( %a, %b, %mask, i32 zeroext %evl) { +; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %not.a = call @llvm.vp.xor.nxv8i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %x = call @llvm.vp.and.nxv8i64( %b, %not.a, %mask, i32 %evl) + ret %x +} + +define @vandn_vx_vp_nxv8i64(i64 %a, %b, %mask, i32 zeroext %evl) { +; CHECK-RV32-LABEL: vandn_vx_vp_nxv8i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-RV32-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_vp_nxv8i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv8i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v16, (a0), zero +; CHECK-ZVBB32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-ZVBB32-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv8i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0, v0.t +; CHECK-ZVBB64-NEXT: ret + %not.a = xor i64 %a, -1 + %head.not.a = insertelement poison, i64 %not.a, i32 0 + %splat.not.a = shufflevector %head.not.a, poison, zeroinitializer + %x = call @llvm.vp.and.nxv8i64( %b, %splat.not.a, %mask, i32 %evl) + ret %x +} +