diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -104,6 +104,7 @@ ; CHECK-NEXT: RISCV Pre-RA pseudo instruction expansion pass ; CHECK-NEXT: RISCV Merge Base Offset ; CHECK-NEXT: RISCV Insert VSETVLI pass +; CHECK-NEXT: RISCV init undef pass ; CHECK-NEXT: Detect Dead Lanes ; CHECK-NEXT: Process Implicit Definitions ; CHECK-NEXT: Remove unreachable machine basic blocks diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -25,26 +25,26 @@ ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: li a0, 55 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v8, (a0), v8 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 2 -; CHECK-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs4r.v v20, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: li s0, 36 ; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma -; CHECK-NEXT: vfwadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfwadd.vv v16, v8, v8, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: call func@plt ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -101,23 +101,23 @@ ; SUBREGLIVENESS-NEXT: sub sp, sp, a0 ; SUBREGLIVENESS-NEXT: li a0, 55 ; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v8, (a0), v8 +; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v16, (a0), v8 ; SUBREGLIVENESS-NEXT: csrr a0, vlenb ; SUBREGLIVENESS-NEXT: slli a0, a0, 3 ; SUBREGLIVENESS-NEXT: add a0, sp, a0 ; SUBREGLIVENESS-NEXT: addi a0, a0, 16 ; SUBREGLIVENESS-NEXT: csrr a1, vlenb ; SUBREGLIVENESS-NEXT: slli a1, a1, 2 -; SUBREGLIVENESS-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill +; SUBREGLIVENESS-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill ; SUBREGLIVENESS-NEXT: add a0, a0, a1 -; SUBREGLIVENESS-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill +; SUBREGLIVENESS-NEXT: vs4r.v v20, (a0) # Unknown-size Folded Spill ; SUBREGLIVENESS-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; SUBREGLIVENESS-NEXT: vmclr.m v0 ; SUBREGLIVENESS-NEXT: li s0, 36 ; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma -; SUBREGLIVENESS-NEXT: vfwadd.vv v8, v8, v8, v0.t +; SUBREGLIVENESS-NEXT: vfwadd.vv v16, v8, v8, v0.t ; SUBREGLIVENESS-NEXT: addi a0, sp, 16 -; SUBREGLIVENESS-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; SUBREGLIVENESS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; SUBREGLIVENESS-NEXT: call func@plt ; SUBREGLIVENESS-NEXT: li a0, 32 ; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll @@ -0,0 +1,147 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple riscv64 -mattr=+v -riscv-enable-subreg-liveness < %s | FileCheck %s + +define dso_local signext i32 @undef_early_clobber_chain() { +; CHECK-LABEL: undef_early_clobber_chain: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -400 +; CHECK-NEXT: .cfi_def_cfa_offset 400 +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma +; CHECK-NEXT: vrgather.vi v9, v8, 0 +; CHECK-NEXT: mv a0, sp +; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: addi sp, sp, 400 +; CHECK-NEXT: ret +entry: + %dst = alloca [100 x float], align 8 + call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %dst) #4 + %0 = tail call @llvm.riscv.vrgather.vx.nxv2f32.i64( undef, undef, i64 0, i64 0) + call void @llvm.riscv.vse.nxv2f32.i64( %0, ptr nonnull %dst, i64 0) + call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %dst) #4 + ret i32 0 +} + +define internal void @SubRegLivenessUndefInPhi(i64 %cond) { +; CHECK-LABEL: SubRegLivenessUndefInPhi: +; CHECK: # %bb.0: # %start +; CHECK-NEXT: blez a0, .LBB1_2 +; CHECK-NEXT: # %bb.1: # %Cond1 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vadd.vi v10, v8, 1 +; CHECK-NEXT: vadd.vi v12, v8, 3 +; CHECK-NEXT: j .LBB1_3 +; CHECK-NEXT: .LBB1_2: # %Cond2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: add a1, a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vi v11, v12, 1 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v10, v11, a0 +; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vi v13, v12, 3 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v12, v13, a0 +; CHECK-NEXT: .LBB1_3: # %UseSR +; CHECK-NEXT: vl1r.v v14, (zero) +; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v15, v14, v8 +; CHECK-NEXT: vrgatherei16.vv v8, v14, v10 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vand.vv v8, v15, v8 +; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v9, v14, v12 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vs1r.v v8, (zero) +; CHECK-NEXT: ret +start: + %0 = icmp sgt i64 %cond, 0 + br i1 %0, label %Cond1, label %Cond2 + +Cond1: ; preds = %start + %v15 = tail call @llvm.experimental.stepvector.nxv1i16() + %v17 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %v15, i64 0) + %vs12.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 1, i32 0), poison, zeroinitializer) + %v18 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs12.i.i.i, i64 0) + %vs16.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 3, i32 0), poison, zeroinitializer) + %v20 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs16.i.i.i, i64 0) + br label %UseSR + +Cond2: ; preds = %start + %v15.2 = tail call @llvm.experimental.stepvector.nxv1i16() + %v17.2 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %v15.2, i64 1) + %vs12.i.i.i.2 = add %v15.2, shufflevector ( insertelement ( poison, i16 1, i32 0), poison, zeroinitializer) + %v18.2 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs12.i.i.i.2, i64 1) + %vs16.i.i.i.2 = add %v15.2, shufflevector ( insertelement ( poison, i16 3, i32 0), poison, zeroinitializer) + %v20.2 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs16.i.i.i.2, i64 1) + br label %UseSR + +UseSR: ; preds = %Cond1, Cond2 + %v17.3 = phi [ %v17, %Cond1 ], [ %v17.2, %Cond2 ] + %v18.3 = phi [ %v18, %Cond1 ], [ %v18.2, %Cond2 ] + %v20.3 = phi [ %v20, %Cond1 ], [ %v20.2, %Cond2 ] + %v37 = load , ptr addrspace(1) null, align 8 + %v38 = tail call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, %v37, %v17.3, i64 4) + %v40 = tail call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, %v37, %v18.3, i64 4) + %v42 = and %v38, %v40 + %v46 = tail call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, %v37, %v20.3, i64 4) + %v60 = and %v42, %v46 + store %v60, ptr addrspace(1) null, align 4 + ret void +} + +define internal void @SubRegLivenessUndef() { +; CHECK-LABEL: SubRegLivenessUndef: +; CHECK: # %bb.0: # %loopIR.preheader.i.i +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vadd.vi v10, v8, 1 +; CHECK-NEXT: vadd.vi v12, v8, 3 +; CHECK-NEXT: .LBB2_1: # %loopIR3.i.i +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl1r.v v14, (zero) +; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v15, v14, v8 +; CHECK-NEXT: vrgatherei16.vv v16, v14, v10 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vand.vv v15, v15, v16 +; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v16, v14, v12 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vand.vv v14, v15, v16 +; CHECK-NEXT: vs1r.v v14, (zero) +; CHECK-NEXT: j .LBB2_1 +loopIR.preheader.i.i: + %v15 = tail call @llvm.experimental.stepvector.nxv1i16() + %v17 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %v15, i64 0) + %vs12.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 1, i32 0), poison, zeroinitializer) + %v18 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs12.i.i.i, i64 0) + %vs16.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 3, i32 0), poison, zeroinitializer) + %v20 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs16.i.i.i, i64 0) + br label %loopIR3.i.i + +loopIR3.i.i: ; preds = %loopIR3.i.i, %loopIR.preheader.i.i + %v37 = load , ptr addrspace(1) null, align 8 + %v38 = tail call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, %v37, %v17, i64 4) + %v40 = tail call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, %v37, %v18, i64 4) + %v42 = and %v38, %v40 + %v46 = tail call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, %v37, %v20, i64 4) + %v60 = and %v42, %v46 + store %v60, ptr addrspace(1) null, align 4 + br label %loopIR3.i.i +} + +declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare @llvm.riscv.vrgather.vx.nxv2f32.i64(, , i64, i64) #2 +declare void @llvm.riscv.vse.nxv2f32.i64(, ptr nocapture, i64) +declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare @llvm.experimental.stepvector.nxv1i16() +declare @llvm.vector.insert.nxv8i16.nxv1i16(, , i64 immarg) +declare @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(, , , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll @@ -44,10 +44,10 @@ ; SUBREG-NEXT: # =>This Inner Loop Header: Depth=1 ; SUBREG-NEXT: vl1r.v v9, (zero) ; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma -; SUBREG-NEXT: vmv1r.v v11, v12 -; SUBREG-NEXT: vrgatherei16.vv v11, v9, v10 +; SUBREG-NEXT: vmv1r.v v13, v12 +; SUBREG-NEXT: vrgatherei16.vv v13, v9, v10 ; SUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; SUBREG-NEXT: vand.vv v9, v8, v11 +; SUBREG-NEXT: vand.vv v9, v8, v13 ; SUBREG-NEXT: vs1r.v v9, (zero) ; SUBREG-NEXT: j .LBB0_1 loopIR.preheader.i.i: