diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -35,6 +35,11 @@ def SplatPat_simm5 : ComplexPattern; def SplatPat_uimm5 : ComplexPattern; +// A mask-vector version of the standard 'vnot' fragment but using splat_vector +// rather than (the implicit) build_vector +def riscv_m_vnot : PatFrag<(ops node:$in), + (xor node:$in, (splat_vector (XLenVT 1)))>; + multiclass VPatUSLoadStoreSDNode; defm "" : VPatBinarySDNode_VV_VX; +// 16.1. Vector Mask-Register Logical Instructions +foreach mti = AllMasks in { + def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), + (!cast("PseudoVMAND_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), + (!cast("PseudoVMOR_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), + (!cast("PseudoVMXOR_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + + def : Pat<(mti.Mask (riscv_m_vnot (and VR:$rs1, VR:$rs2))), + (!cast("PseudoVMNAND_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + def : Pat<(mti.Mask (riscv_m_vnot (or VR:$rs1, VR:$rs2))), + (!cast("PseudoVMNOR_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + def : Pat<(mti.Mask (riscv_m_vnot (xor VR:$rs1, VR:$rs2))), + (!cast("PseudoVMXNOR_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + + def : Pat<(mti.Mask (and VR:$rs1, (riscv_m_vnot VR:$rs2))), + (!cast("PseudoVMANDNOT_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + def : Pat<(mti.Mask (or VR:$rs1, (riscv_m_vnot VR:$rs2))), + (!cast("PseudoVMORNOT_MM_"#mti.LMul.MX) + VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; +} + } // Predicates = [HasStdExtV] //===----------------------------------------------------------------------===// @@ -196,6 +231,13 @@ (!cast("PseudoVMV_V_I_" # vti.LMul.MX) simm5:$rs1, VLMax, vti.SEW)>; } + +foreach mti = AllMasks in { + def : Pat<(mti.Mask (splat_vector (XLenVT 1))), + (!cast("PseudoVMSET_M_"#mti.BX) VLMax, mti.SEW)>; + def : Pat<(mti.Mask (splat_vector (XLenVT 0))), + (!cast("PseudoVMCLR_M_"#mti.BX) VLMax, mti.SEW)>; +} } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, IsRV32] in { diff --git a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll @@ -0,0 +1,479 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmand_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmand_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vmand_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmand_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vmand_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmand_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vmand_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmand_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vmand_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmand_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vmor_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmor_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + ret %vc +} + +define @vmor_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmor_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + ret %vc +} + +define @vmor_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmor_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + ret %vc +} + +define @vmor_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmor_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + ret %vc +} + +define @vmor_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmor_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + ret %vc +} + +define @vmxor_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmxor_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vmxor_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmxor_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vmxor_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmxor_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vmxor_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmxor_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vmxor_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmxor_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vmnand_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmnand_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmnand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnand_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmnand_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmnand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnand_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmnand_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmnand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnand_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmnand_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmnand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnand_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmnand_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmnand.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = and %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnor_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmnor_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnor_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmnor_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnor_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmnor_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnor_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmnor_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmnor_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmnor_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = or %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmxnor_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmxnor_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmxnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmxnor_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmxnor_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmxnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmxnor_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmxnor_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmxnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmxnor_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmxnor_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmxnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmxnor_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmxnor_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmxnor.mm v0, v0, v16 +; CHECK-NEXT: ret + %vc = xor %va, %vb + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vc, %splat + ret %not +} + +define @vmandnot_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmandnot_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmandnot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = and %va, %not + ret %vc +} + +define @vmandnot_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmandnot_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmandnot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = and %va, %not + ret %vc +} + +define @vmandnot_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmandnot_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmandnot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = and %va, %not + ret %vc +} + +define @vmandnot_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmandnot_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmandnot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = and %va, %not + ret %vc +} + +define @vmandnot_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmandnot_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmandnot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = and %va, %not + ret %vc +} + +define @vmornot_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmornot_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmornot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = or %va, %not + ret %vc +} + +define @vmornot_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmornot_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmornot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = or %va, %not + ret %vc +} + +define @vmornot_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmornot_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmornot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = or %va, %not + ret %vc +} + +define @vmornot_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmornot_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmornot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = or %va, %not + ret %vc +} + +define @vmornot_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmornot_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmornot.mm v0, v0, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %not = xor %vb, %splat + %vc = or %va, %not + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll @@ -0,0 +1,113 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vsplat_nxv1i1_0() { +; CHECK-LABEL: vsplat_nxv1i1_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv1i1_1() { +; CHECK-LABEL: vsplat_nxv1i1_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv2i1_0() { +; CHECK-LABEL: vsplat_nxv2i1_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv2i1_1() { +; CHECK-LABEL: vsplat_nxv2i1_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv4i1_0() { +; CHECK-LABEL: vsplat_nxv4i1_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv4i1_1() { +; CHECK-LABEL: vsplat_nxv4i1_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8i1_0() { +; CHECK-LABEL: vsplat_nxv8i1_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8i1_1() { +; CHECK-LABEL: vsplat_nxv8i1_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv16i1_0() { +; CHECK-LABEL: vsplat_nxv16i1_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv16i1_1() { +; CHECK-LABEL: vsplat_nxv16i1_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i1 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +}