diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -343,6 +343,9 @@ // RVV intrinsics may have illegal operands. for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) setOperationAction(ISD::INTRINSIC_WO_CHAIN, VT, Custom); + + for (auto VT : MVT::integer_scalable_vector_valuetypes()) + setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); } // Function alignments. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -365,29 +365,57 @@ //===----------------------------------------------------------------------===// // Helpers to define the different patterns. //===----------------------------------------------------------------------===// -class VPatBinarySDNode : +class VPatBinarySDNode_VV : Pat<(result_type (vop (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2))), - (!cast(instruction_name#"_VV_"# vlmul.MX) + (!cast(instruction_name#"_VV_"# vlmul.MX) op_reg_class:$rs1, op_reg_class:$rs2, VLMax, sew)>; -multiclass VPatBinarySDNode +class VPatBinarySDNode_XI : + Pat<(result_type (vop + (vop_type vop_reg_class:$rs1), + (vop_type (splat_vector xop_kind:$rs2)))), + (!cast(instruction_name#_#suffix#_# vlmul.MX) + vop_reg_class:$rs1, + xop_kind:$rs2, + VLMax, sew)>; + +multiclass VPatBinarySDNode_VV_VX_VI { - foreach vti = AllIntegerVectors in - def : VPatBinarySDNode; + foreach vti = AllIntegerVectors in { + def : VPatBinarySDNode_VV; + def : VPatBinarySDNode_XI; + def : VPatBinarySDNode_XI; + } } class VPatBinaryNoMask; +defm "" : VPatBinarySDNode_VV_VX_VI; //===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll @@ -0,0 +1,650 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vadd_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv64.ll @@ -0,0 +1,794 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vadd_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +}