diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -96,6 +96,19 @@ xop_kind:$rs2, VLMax, sew)>; +multiclass VPatBinarySDNode_VV_VX +{ + foreach vti = AllIntegerVectors in { + def : VPatBinarySDNode_VV; + def : VPatBinarySDNode_XI; + } +} + multiclass VPatBinarySDNode_VV_VX_VI { @@ -127,13 +140,29 @@ // 12.1. Vector Single-Width Integer Add and Subtract defm "" : VPatBinarySDNode_VV_VX_VI; +defm "" : VPatBinarySDNode_VV_VX; +// Handle VRSUB specially since it's the only integer binary op with reversed +// pattern operands +foreach vti = AllIntegerVectors in { + def : Pat<(sub (vti.Vector (SplatPat XLenVT:$rs2)), + (vti.Vector vti.RegClass:$rs1)), + (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) + vti.RegClass:$rs1, GPR:$rs2, VLMax, vti.SEW)>; + def : Pat<(sub (vti.Vector (SplatPat_simm5 XLenVT:$rs2)), + (vti.Vector vti.RegClass:$rs1)), + (!cast("PseudoVRSUB_VI_"# vti.LMul.MX) + vti.RegClass:$rs1, simm5:$rs2, VLMax, vti.SEW)>; +} // 12.5. Vector Bitwise Logical Instructions +defm "" : VPatBinarySDNode_VV_VX_VI; defm "" : VPatBinarySDNode_VV_VX_VI; +defm "" : VPatBinarySDNode_VV_VX_VI; // 12.6. Vector Single-Width Bit Shift Instructions defm "" : VPatBinarySDNode_VV_VX_VI; defm "" : VPatBinarySDNode_VV_VX_VI; +defm "" : VPatBinarySDNode_VV_VX_VI; } // Predicates = [HasStdExtV] diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode-rv32.ll @@ -0,0 +1,1333 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vand_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv16i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv32i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv64i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv64i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv64i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv16i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv32i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vand_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vand_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vand_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vand_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vand_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv16i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vand.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vand.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vand.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode-rv64.ll @@ -0,0 +1,1305 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vand_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv16i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv32i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vand_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vand_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv64i8_1( %va) { +; CHECK-LABEL: vand_vi_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv64i8_2( %va) { +; CHECK-LABEL: vand_vi_nxv64i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv16i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vand_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vand_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i16_1( %va) { +; CHECK-LABEL: vand_vi_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv32i16_2( %va) { +; CHECK-LABEL: vand_vi_nxv32i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vand_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vand_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vand_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vand_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vand_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vand_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i32_1( %va) { +; CHECK-LABEL: vand_vi_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv16i32_2( %va) { +; CHECK-LABEL: vand_vi_nxv16i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv1i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv2i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv4i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vand_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = and %va, %vb + ret %vc +} + +define @vand_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vand_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vand_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, -10 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -10, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vand_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + +define @vand_vi_nxv8i64_2( %va) { +; CHECK-LABEL: vand_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = and %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode-rv32.ll @@ -0,0 +1,559 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vrsub_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vrsub_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vrsub_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vrsub_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vrsub_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vrsub_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vsub.vv v16, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsub.vv v16, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vsub.vv v16, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vsub.vv v16, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode-rv64.ll @@ -0,0 +1,531 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vrsub_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vrsub_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vrsub_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + +define @vrsub_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vrsub_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vrsub.vi v16, v16, -4 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %splat, %va + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode-rv32.ll @@ -0,0 +1,1069 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vsra_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv64i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vsra_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vsra_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vsra_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vsra_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vsra_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vsra.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsra.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vsra.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode-rv64.ll @@ -0,0 +1,1041 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vsra_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vsra_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vsra_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv64i8_1( %va) { +; CHECK-LABEL: vsra_vi_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vsra_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vsra_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv32i16_1( %va) { +; CHECK-LABEL: vsra_vi_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vsra_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vsra_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vsra_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vsra_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vsra_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vsra_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv16i32_1( %va) { +; CHECK-LABEL: vsra_vi_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsra.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vsra_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsra.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = ashr %va, %vb + ret %vc +} + +define @vsra_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vsra_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vsra_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsra.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + +define @vsra_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vsra_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsra.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = ashr %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode-rv32.ll @@ -0,0 +1,805 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vsub_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vsub_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vsub_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vsub_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vsub_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vsub_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vsub.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsub.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vsub.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode-rv64.ll @@ -0,0 +1,777 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vsub_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vsub_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vsub_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vsub_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vsub_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vsub_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vsub_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vsub_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vsub_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vsub_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vsub_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsub.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vsub_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsub.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sub %va, %vb + ret %vc +} + +define @vsub_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vsub_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + +define @vsub_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vsub_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsub.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sub %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode-rv32.ll @@ -0,0 +1,1333 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vxor_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv16i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv32i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv64i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv64i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv64i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv16i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv32i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vxor_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vxor_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vxor_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vxor_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vxor_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv16i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vxor.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vxor.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vxor.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode-rv64.ll @@ -0,0 +1,1305 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vxor_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv16i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv32i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vxor_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vxor_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv64i8_1( %va) { +; CHECK-LABEL: vxor_vi_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i8 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv64i8_2( %va) { +; CHECK-LABEL: vxor_vi_nxv64i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv16i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vxor_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vxor_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i16_1( %va) { +; CHECK-LABEL: vxor_vi_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i16 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv32i16_2( %va) { +; CHECK-LABEL: vxor_vi_nxv32i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vxor_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vxor_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vxor_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vxor_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vxor_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vxor_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i32_1( %va) { +; CHECK-LABEL: vxor_vi_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i32 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv16i32_2( %va) { +; CHECK-LABEL: vxor_vi_nxv16i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv1i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv2i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv4i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vxor_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vxor.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = xor %va, %vb + ret %vc +} + +define @vxor_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vxor_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vxor_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vxor_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vxor.vi v16, v16, 8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 8, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} + +define @vxor_vi_nxv8i64_2( %va) { +; CHECK-LABEL: vxor_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vxor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = xor %va, %splat + ret %vc +} +