diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll @@ -0,0 +1,79924 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv64i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv64i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv64i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv16i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv64i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv64i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv64i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv1i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv64i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv64i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv64i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv1i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv64i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv64i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv1i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv64i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv64i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv1i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv64i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv64i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv1i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv64i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv64i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv1i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv64i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv64i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv1i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv64i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv64i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv64i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv16i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv64i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv64i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv64i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv16i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv64i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv64i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv16i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv64i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv64i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv64i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv2i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv64i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv64i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv64i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv64i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv2i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv64i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv64i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv2i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv64i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv64i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv2i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv64i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv64i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv2i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv64i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv64i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv2i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv64i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv64i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv2i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv64i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv64i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv64i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv64i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv64i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv64i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv64i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv64i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv64i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv64i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv64i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv64i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv64i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv64i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv64i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv64i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv64i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv64i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv64i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv1i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv64i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv64i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv64i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv64i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv1i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv64i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv64i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv1i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv64i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv64i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i8(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i8(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i16(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i16(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i32(,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i32(,,,,, i32*, , , i32) + +define void @test_vsuxseg5_nxv1i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv64i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv64i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i8(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i8(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i16(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i16(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i32(,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i32(,,,,,, i32*, , , i32) + +define void @test_vsuxseg6_nxv1i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv64i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv64i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i8(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i8(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i16(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i16(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i32(,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i32(,,,,,,, i32*, , , i32) + +define void @test_vsuxseg7_nxv1i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv64i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv64i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i8(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i8(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i16(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i16(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i32(,,,,,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i32(,,,,,,,, i32*, , , i32) + +define void @test_vsuxseg8_nxv1i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv64i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv64i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv64i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv8i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv64i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv64i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv64i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv8i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv64i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv64i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv8i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv64i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv64i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv64i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv8i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv64i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv64i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv64i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv8i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv64i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv64i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv8i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv64i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv64i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv8i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv64i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv64i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv8i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv64i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv64i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv8i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv64i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv64i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv8i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv64i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv64i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv64i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv8i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv64i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv64i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv64i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv64i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv64i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv64i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv64i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv64i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv64i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv64i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv64i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv64i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv64i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv64i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv64i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv64i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv64i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv64i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv64i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv1i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv64i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv64i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv64i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv1i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv64i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv64i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv1i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv64i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv64i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv1i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv64i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv64i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv1i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv64i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv64i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv1i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv64i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv64i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv1i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv64i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv64i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv64i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv32i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv32i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv64i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv64i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv64i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i32(,, i8*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i32(,, i8*, , , i32) + +define void @test_vsuxseg2_nxv2i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv64i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv64i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv64i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i32(,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i32(,,, i8*, , , i32) + +define void @test_vsuxseg3_nxv2i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv64i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv64i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i32(,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i32(,,,, i8*, , , i32) + +define void @test_vsuxseg4_nxv2i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv64i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv64i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i32(,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i32(,,,,, i8*, , , i32) + +define void @test_vsuxseg5_nxv2i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv64i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv64i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i32(,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i32(,,,,,, i8*, , , i32) + +define void @test_vsuxseg6_nxv2i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv64i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv64i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i32(,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i32(,,,,,,, i8*, , , i32) + +define void @test_vsuxseg7_nxv2i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv32i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv64i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv64i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv64i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv32i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv16i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i32(,,,,,,,, i8*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i32(,,,,,,,, i8*, , , i32) + +define void @test_vsuxseg8_nxv2i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv64i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv64i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv64i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i32(,, i16*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i32(,, i16*, , , i32) + +define void @test_vsuxseg2_nxv2i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i32( %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv64i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv64i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv64i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i32(,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i32(,,, i16*, , , i32) + +define void @test_vsuxseg3_nxv2i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv64i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv64i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i32(,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i32(,,,, i16*, , , i32) + +define void @test_vsuxseg4_nxv2i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv64i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv64i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i32(,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i32(,,,,, i16*, , , i32) + +define void @test_vsuxseg5_nxv2i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv64i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv64i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i32(,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i32(,,,,,, i16*, , , i32) + +define void @test_vsuxseg6_nxv2i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv64i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv64i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i32(,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i32(,,,,,,, i16*, , , i32) + +define void @test_vsuxseg7_nxv2i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv16i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv16i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv32i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv64i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv64i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv64i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv32i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv16i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i32(,,,,,,,, i16*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i32(,,,,,,,, i16*, , , i32) + +define void @test_vsuxseg8_nxv2i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv64i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv64i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv64i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i8(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i8(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i8( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i16(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i16(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i16( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32(,, i32*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32) + +define void @test_vsuxseg2_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv64i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv64i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv64i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv64i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i8(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i8(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i8( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i16(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i16(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i16( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32(,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32) + +define void @test_vsuxseg3_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv16i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv16i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv16i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv16i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv32i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv32i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv8i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv8i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv8i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv8i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv8i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv8i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv64i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv64i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv64i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv64i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv32i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv32i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i8(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i8(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv16i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv16i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i16(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i16(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32(,,,, i32*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32) + +define void @test_vsuxseg4_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv64i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv64i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv64i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv64i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv16f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv16f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv64i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv64i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv64i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv64i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv4f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv64i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv64i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv64i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv64i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv64i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv64i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv64i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv64i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv64i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv64i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i16(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i16(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i8(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i8(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i32(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i32(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i16(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i16(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i16(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i16(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i16(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i16(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i8(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i8(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i32(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i32(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv64i8(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv64i8(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i8(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i8(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i8(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i8(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i8(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i8(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i32(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i32(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i16(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i16(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i32(,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i32(,,,,, double*, , , i32) + +define void @test_vsuxseg5_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i16(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i16(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i8(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i8(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i32(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i32(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i16(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i16(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i16(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i16(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i16(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i16(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i8(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i8(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i32(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i32(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv64i8(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv64i8(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i8(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i8(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i8(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i8(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i8(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i8(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i32(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i32(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i16(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i16(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i32(,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i32(,,,,,, double*, , , i32) + +define void @test_vsuxseg6_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i16(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i16(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i8(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i8(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i32(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i32(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i16(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i16(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i16(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i16(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i16(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i16(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i8(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i8(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i32(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i32(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv64i8(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv64i8(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i8(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i8(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i8(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i8(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i8(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i8(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i32(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i32(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i16(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i16(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i32(,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i32(,,,,,,, double*, , , i32) + +define void @test_vsuxseg7_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i16(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i16(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i8(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i8(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i32(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i32(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i16(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i16(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i16(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i16(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i16(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i16(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i8(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i8(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i32(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i32(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv64i8(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv64i8(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i8(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i8(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i8(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i8(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i8(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i8(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i32(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i32(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i16(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i16(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i32(,,,,,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i32(,,,,,,,, double*, , , i32) + +define void @test_vsuxseg8_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv64i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv64i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv64i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv64i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv2f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv64i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv64i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv64i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv64i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv2f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv64i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv64i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv2f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv64i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv64i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv2f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv64i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv64i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv2f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv64i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv64i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv2f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv64i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv64i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv2f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv64i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv64i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv64i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv64i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv1f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv64i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv64i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv64i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv1f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv64i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv64i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv1f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv64i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv64i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv1f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv64i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv64i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv1f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv64i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv64i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv1f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv64i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv64i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv1f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv64i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv64i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv64i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv64i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv1f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv64i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv64i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv64i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv64i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv1f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv64i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv64i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv1f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv64i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv64i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i8(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i8(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i16(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i16(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i32(,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i32(,,,,, float*, , , i32) + +define void @test_vsuxseg5_nxv1f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv64i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv64i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i8(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i8(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i16(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i16(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i32(,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i32(,,,,,, float*, , , i32) + +define void @test_vsuxseg6_nxv1f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv64i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv64i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i8(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i8(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i16(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i16(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i32(,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i32(,,,,,,, float*, , , i32) + +define void @test_vsuxseg7_nxv1f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv64i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv64i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i8(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i8(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i16(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i16(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i32(,,,,,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i32(,,,,,,,, float*, , , i32) + +define void @test_vsuxseg8_nxv1f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv64i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv64i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv64i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv64i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv8f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv64i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv64i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv64i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv8f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv8f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv64i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv64i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv8f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv8f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv64i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv64i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv64i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv64i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv1r.v v25, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv8f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv2r.v v26, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv64i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv64i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv64i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv64i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i32(,, double*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i32(,, double*, , , i32) + +define void @test_vsuxseg2_nxv2f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i32( %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv64i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv64i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv64i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv64i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i32(,,, double*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i32(,,, double*, , , i32) + +define void @test_vsuxseg3_nxv2f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv64i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv64i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i32(,,,, double*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i32(,,,, double*, , , i32) + +define void @test_vsuxseg4_nxv2f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv64i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv64i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv64i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv64i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv64i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv64i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv64i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv64i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv64i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv64i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv64i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv64i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv64i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv64i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv64i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv64i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv64i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv64i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv64i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv64i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv64i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 +; CHECK-NEXT: vmv1r.v v25, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i32(,, half*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i32(,, half*, , , i32) + +define void @test_vsuxseg2_nxv2f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i32( %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv64i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv64i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv64i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i32(,,, half*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i32(,,, half*, , , i32) + +define void @test_vsuxseg3_nxv2f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv64i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv64i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i32(,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i32(,,,, half*, , , i32) + +define void @test_vsuxseg4_nxv2f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv64i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv64i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i32(,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i32(,,,,, half*, , , i32) + +define void @test_vsuxseg5_nxv2f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv64i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv64i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i32(,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i32(,,,,,, half*, , , i32) + +define void @test_vsuxseg6_nxv2f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv64i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv64i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i32(,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i32(,,,,,,, half*, , , i32) + +define void @test_vsuxseg7_nxv2f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv16i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv1i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv16i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv4i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv32i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv1i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv8i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv8i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv8i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv64i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv64i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv64i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv4i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv1i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv32i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv16i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i32(,,,,,,,, half*, , i32) +declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i32(,,,,,,,, half*, , , i32) + +define void @test_vsuxseg8_nxv2f16_nxv4i32( %val, half* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv64i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv64i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv64i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv64i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i8(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i8(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i8( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i16(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i16(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i16( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv1r.v v25, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32(,, float*, , i32) +declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32) + +define void @test_vsuxseg2_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32( %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv64i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv64i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv64i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv64i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i8(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i8(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i8( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i16(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i16(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i16( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32(,,, float*, , i32) +declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32) + +define void @test_vsuxseg3_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv16i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv16i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv1i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv16i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv16i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv2i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv32i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv32i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv1i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv8i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv8i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv8i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv8i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv8i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv8i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv64i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv64i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv64i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv64i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv1i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv32i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv32i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i8(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i8(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv2i8( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv16i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv16i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 +; CHECK-NEXT: vmv2r.v v10, v8 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i16(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i16(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv2i16( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32(,,,, float*, , i32) +declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32) + +define void @test_vsuxseg4_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v8 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + ret void +} +