diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -201,7 +201,9 @@ Operands.push_back(Node->getOperand(CurOp++)); // Stride. if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); // Mask. - Operands.push_back(Node->getOperand(CurOp++)); // VL. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. const RISCV::VLSEGPseudo *P = @@ -240,7 +242,9 @@ Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); // Mask. - Operands.push_back(Node->getOperand(CurOp++)); // VL. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. const RISCV::VLSEGPseudo *P = @@ -285,7 +289,9 @@ MVT IndexVT = Operands.back()->getSimpleValueType(0); if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); // Mask. - Operands.push_back(Node->getOperand(CurOp++)); // VL. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. @@ -329,7 +335,9 @@ Operands.push_back(Node->getOperand(CurOp++)); // Stride. if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); // Mask. - Operands.push_back(Node->getOperand(CurOp++)); // VL. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo( @@ -360,7 +368,9 @@ MVT IndexVT = Operands.back()->getSimpleValueType(0); if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); // Mask. - Operands.push_back(Node->getOperand(CurOp++)); // VL. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -0,0 +1,261 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +; Make sure we don't select a 0 vl to X0 in the custom isel handlers we use +; for these intrinsics.1 + +declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64) + +define @test_vlseg2_mask_nxv16i16(i16* %base, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v4, (a0) +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 0) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 0) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64) + +define @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: vsetvli a3, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 0) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 0) + %3 = extractvalue {,} %2, 1 + ret %3 +} +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) + +define @test_vloxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) + +define @test_vluxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64) + +define @test_vlseg2ff_nxv16i16(i16* %base, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a1) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 0) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv16i16( %val, i16* %base, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a1) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare void @llvm.riscv.vsseg2.nxv16i16(,, i16* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv16i16(,, i16*, , i64) + +define void @test_vsseg2_nxv16i16( %val, i16* %base) { +; CHECK-LABEL: test_vsseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, i16* %base, i64 0) + ret void +} + +define void @test_vsseg2_mask_nxv16i16( %val, i16* %base, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv16i16(,, i16*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv16i16(,, i16*, i64, , i64) + +define void @test_vssseg2_nxv16i16( %val, i16* %base, i64 %offset) { +; CHECK-LABEL: test_vssseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, i16* %base, i64 %offset, i64 0) + ret void +} + +define void @test_vssseg2_mask_nxv16i16( %val, i16* %base, i64 %offset, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, i16* %base, i64 %offset, %mask, i64 0) + ret void +} + +declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16(,, i16*, , i64) +declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) + +define void @test_vsoxseg2_nxv16i16_nxv16i16( %val, i16* %base, %index) { +; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 0) + ret void +} + +define void @test_vsoxseg2_mask_nxv16i16_nxv16i16( %val, i16* %base, %index, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 0) + ret void +} + +declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16(,, i16*, , i64) +declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) + +define void @test_vsuxseg2_nxv16i16_nxv16i16( %val, i16* %base, %index) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 0) + ret void +} + +define void @test_vsuxseg2_mask_nxv16i16_nxv16i16( %val, i16* %base, %index, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v28, v12 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 0) + ret void +}