diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -170,6 +170,17 @@ defm "" : VPatBinarySDNode_VV_VX; defm "" : VPatBinarySDNode_VV_VX; +// 12.10. Vector Single-Width Integer Multiply Instructions +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; + +// 12.11. Vector Integer Divide Instructions +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; + } // Predicates = [HasStdExtV] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll @@ -0,0 +1,805 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vdiv_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vdiv_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vdiv_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vdiv_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vdiv_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vdiv_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vdivu.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vdivu.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vdivu.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll @@ -0,0 +1,777 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vdiv_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vdiv_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vdivu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vdiv_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vdivu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = sdiv %va, %vb + ret %vc +} + +define @vdiv_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vdiv_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +define @vdiv_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vdiv_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll @@ -0,0 +1,805 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vdivu_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vdivu_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vdivu_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vdivu_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vdivu_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vdivu_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vdiv.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vdiv.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vdiv.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv64.ll @@ -0,0 +1,777 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vdivu_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vdivu_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vdiv.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vdivu_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vdiv.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = udiv %va, %vb + ret %vc +} + +define @vdivu_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vdivu_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +define @vdivu_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vdivu_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv32.ll @@ -0,0 +1,805 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmul_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vmul_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vmul_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vmul_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vmul_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vmul_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vmul.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vmul.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vmul.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv64.ll @@ -0,0 +1,777 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmul_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmul_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmul_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmul_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmul_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vmul_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vmul_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vmul_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vmul_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vmul_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmul_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmul.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmul_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmul.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = mul %va, %vb + ret %vc +} + +define @vmul_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmul_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + +define @vmul_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmul_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmul.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll @@ -0,0 +1,805 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vrem_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vrem_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vrem_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vrem_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vrem_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vrem_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vrem.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vrem.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vrem.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll @@ -0,0 +1,777 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vrem_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vrem_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vrem_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vrem_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vrem_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vrem_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vrem_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vrem_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vrem_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vrem_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vrem_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vrem.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vrem.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = srem %va, %vb + ret %vc +} + +define @vrem_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vrem_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + +define @vrem_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vrem_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vrem.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = srem %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode-rv32.ll @@ -0,0 +1,804 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vremu_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vremu_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vremu_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vremu_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vremu_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vremu_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vremu.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vremu.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vremu.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode-rv64.ll @@ -0,0 +1,777 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vremu_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vremu_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vremu_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vremu_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vremu_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vremu_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vremu_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vremu_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vremu_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vremu_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vremu_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v17 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v18 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vremu.vv v16, v16, v20 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vremu_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vremu.vv v16, v16, v8 +; CHECK-NEXT: ret + %vc = urem %va, %vb + ret %vc +} + +define @vremu_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vremu_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} + +define @vremu_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vremu_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vremu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -7, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = urem %va, %splat + ret %vc +} +