diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -286,10 +286,6 @@ void RISCVPassConfig::addPostRegAlloc() { if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination) addPass(createRISCVRedundantCopyEliminationPass()); - - // Temporarily disabled until post-RA pseudo expansion problem is fixed, - // see D123394 and D139169. - disablePass(&MachineLateInstrsCleanupID); } yaml::MachineFunctionInfo * diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -144,6 +144,7 @@ ; CHECK-NEXT: Machine Optimization Remark Emitter ; CHECK-NEXT: Shrink Wrapping analysis ; CHECK-NEXT: Prologue/Epilogue Insertion & Frame Finalization +; CHECK-NEXT: Machine Late Instructions Cleanup Pass ; CHECK-NEXT: Control Flow Optimizer ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Tail Duplication diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll --- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll +++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll @@ -826,7 +826,6 @@ ; CHECK-RV32-NEXT: #NO_APP ; CHECK-RV32-NEXT: lui a0, 2 ; CHECK-RV32-NEXT: sub sp, s0, a0 -; CHECK-RV32-NEXT: lui a0, 2 ; CHECK-RV32-NEXT: addi a0, a0, -2032 ; CHECK-RV32-NEXT: add sp, sp, a0 ; CHECK-RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload @@ -1076,7 +1075,6 @@ ; CHECK-RV64-NEXT: #NO_APP ; CHECK-RV64-NEXT: lui a0, 2 ; CHECK-RV64-NEXT: sub sp, s0, a0 -; CHECK-RV64-NEXT: lui a0, 2 ; CHECK-RV64-NEXT: addiw a0, a0, -2032 ; CHECK-RV64-NEXT: add sp, sp, a0 ; CHECK-RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload @@ -2323,7 +2321,6 @@ ; CHECK-RV32-NEXT: #NO_APP ; CHECK-RV32-NEXT: lui a0, 2 ; CHECK-RV32-NEXT: sub sp, s0, a0 -; CHECK-RV32-NEXT: lui a0, 2 ; CHECK-RV32-NEXT: addi a0, a0, -2032 ; CHECK-RV32-NEXT: add sp, sp, a0 ; CHECK-RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload @@ -2561,7 +2558,6 @@ ; CHECK-RV64-NEXT: #NO_APP ; CHECK-RV64-NEXT: lui a0, 2 ; CHECK-RV64-NEXT: sub sp, s0, a0 -; CHECK-RV64-NEXT: lui a0, 2 ; CHECK-RV64-NEXT: addiw a0, a0, -2032 ; CHECK-RV64-NEXT: add sp, sp, a0 ; CHECK-RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir --- a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir @@ -43,7 +43,6 @@ ; CHECK-NEXT: call foo@plt ; CHECK-NEXT: lui a0, 2 ; CHECK-NEXT: sub sp, s0, a0 - ; CHECK-NEXT: lui a0, 2 ; CHECK-NEXT: addiw a0, a0, -2032 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -1394,7 +1394,6 @@ ; RV32-NEXT: addi a5, sp, 16 ; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v0 -; RV32-NEXT: addi a5, sp, 16 ; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v0, v8, a2 ; RV32-NEXT: vsll.vx v0, v0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll @@ -2943,7 +2943,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -3036,7 +3035,6 @@ ; RV32-NEXT: addi a6, sp, 16 ; RV32-NEXT: vl8r.v v0, (a6) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: addi a6, sp, 16 ; RV32-NEXT: vs8r.v v24, (a6) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vx v0, v8, a3 ; RV32-NEXT: vand.vx v0, v0, a2 @@ -3297,7 +3295,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -3390,7 +3387,6 @@ ; RV32-NEXT: addi a6, sp, 16 ; RV32-NEXT: vl8r.v v0, (a6) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: addi a6, sp, 16 ; RV32-NEXT: vs8r.v v24, (a6) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vx v0, v8, a3 ; RV32-NEXT: vand.vx v0, v0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -530,7 +530,6 @@ ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v0 -; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v0, v8, a2 ; RV32-NEXT: vsll.vx v0, v0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll @@ -1153,7 +1153,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -1213,7 +1212,6 @@ ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vx v0, v8, a3 ; RV32-NEXT: vand.vx v0, v0, a2 @@ -1225,7 +1223,6 @@ ; RV32-NEXT: vand.vx v8, v8, a4 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vor.vv v8, v8, v24 -; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: csrr a0, vlenb @@ -1393,7 +1390,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -1453,7 +1449,6 @@ ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vx v0, v8, a3 ; RV32-NEXT: vand.vx v0, v0, a2 @@ -1465,7 +1460,6 @@ ; RV32-NEXT: vand.vx v8, v8, a4 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vor.vv v8, v8, v24 -; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -765,10 +765,8 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll @@ -2338,7 +2338,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -2422,7 +2421,6 @@ ; RV32-NEXT: addi a7, sp, 16 ; RV32-NEXT: vl8r.v v0, (a7) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v24, v0 -; RV32-NEXT: addi a7, sp, 16 ; RV32-NEXT: vs8r.v v24, (a7) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v0, v8, a3 ; RV32-NEXT: vsll.vx v0, v0, a2 @@ -2706,7 +2704,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -2790,7 +2787,6 @@ ; RV32-NEXT: addi a7, sp, 16 ; RV32-NEXT: vl8r.v v0, (a7) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v24, v0 -; RV32-NEXT: addi a7, sp, 16 ; RV32-NEXT: vs8r.v v24, (a7) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v0, v8, a3 ; RV32-NEXT: vsll.vx v0, v0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll @@ -1044,7 +1044,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -1107,7 +1106,6 @@ ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v24, v0 -; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v0, v8, a3 ; RV32-NEXT: vsll.vx v0, v0, a2 @@ -1314,7 +1312,6 @@ ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t -; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t @@ -1377,7 +1374,6 @@ ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v24, v0 -; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v0, v8, a3 ; RV32-NEXT: vsll.vx v0, v0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -712,7 +712,6 @@ ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -766,7 +765,6 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -712,7 +712,6 @@ ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -766,7 +765,6 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -638,7 +638,6 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 @@ -697,7 +696,6 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -712,7 +712,6 @@ ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -766,7 +765,6 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -712,7 +712,6 @@ ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -766,7 +765,6 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -712,7 +712,6 @@ ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -766,7 +765,6 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll @@ -206,7 +206,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwadd.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll @@ -206,7 +206,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwmul.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll @@ -206,7 +206,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwsub.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -2405,7 +2405,6 @@ ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v16, v0, 3 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: addi a2, sp, 16 ; RV64-NEXT: vl1r.v v24, (a2) # Unknown-size Folded Reload ; RV64-NEXT: addi a2, a1, -16 ; RV64-NEXT: sltu a3, a1, a2 @@ -2478,7 +2477,6 @@ ; RV64-NEXT: vzext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v16, v0, 3 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: addi a2, sp, 16 ; RV64-NEXT: vl1r.v v24, (a2) # Unknown-size Folded Reload ; RV64-NEXT: addi a2, a1, -16 ; RV64-NEXT: sltu a3, a1, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll @@ -333,7 +333,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwadd.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll @@ -333,7 +333,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwaddu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -363,7 +363,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmul.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll @@ -355,7 +355,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v8, v24, v16 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulsu.vv v16, v0, v24 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -339,7 +339,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll @@ -333,7 +333,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsub.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll @@ -333,7 +333,6 @@ ; CHECK-NEXT: vslidedown.vi v0, v24, 16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v8, v16, v24 -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsubu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -765,10 +765,8 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll @@ -225,7 +225,6 @@ ; CHECK-NEXT: vand.vi v8, v8, 7, v0.t ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t ; CHECK-NEXT: vand.vi v16, v24, 7, v0.t -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t @@ -257,7 +256,6 @@ ; CHECK-NEXT: vand.vi v8, v8, 7, v0.t ; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t ; CHECK-NEXT: vand.vi v16, v24, 7, v0.t -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t @@ -459,7 +457,6 @@ ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t ; CHECK-NEXT: vand.vi v16, v24, 15, v0.t -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t @@ -491,7 +488,6 @@ ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t ; CHECK-NEXT: vand.vi v16, v24, 15, v0.t -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t @@ -1082,7 +1078,6 @@ ; CHECK-NEXT: .LBB46_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a2, 48 ; CHECK-NEXT: mul a1, a1, a2 ; CHECK-NEXT: add a1, sp, a1 @@ -1269,7 +1264,6 @@ ; CHECK-NEXT: .LBB47_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a2, 48 ; CHECK-NEXT: mul a1, a1, a2 ; CHECK-NEXT: add a1, sp, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -699,7 +699,6 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB32_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -765,10 +765,8 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -765,10 +765,8 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -765,10 +765,8 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll @@ -33,7 +33,6 @@ ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 1 @@ -76,7 +75,6 @@ ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 1 @@ -119,7 +117,6 @@ ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 1 @@ -162,7 +159,6 @@ ; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 2 @@ -205,7 +201,6 @@ ; SPILL-O2-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -42,7 +42,6 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: add a0, a0, a1 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload @@ -98,7 +97,6 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: add a0, a0, a1 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll @@ -33,7 +33,6 @@ ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 1 @@ -76,7 +75,6 @@ ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 1 @@ -119,7 +117,6 @@ ; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 2 @@ -162,7 +159,6 @@ ; SPILL-O2-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP -; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -42,7 +42,6 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: add a0, a0, a1 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload @@ -98,7 +97,6 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: add a0, a0, a1 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll --- a/llvm/test/CodeGen/RISCV/stack-realignment.ll +++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll @@ -547,7 +547,6 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lui a0, 2 ; RV32I-NEXT: sub sp, s0, a0 -; RV32I-NEXT: lui a0, 2 ; RV32I-NEXT: addi a0, a0, -2032 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload @@ -575,7 +574,6 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: lui a0, 2 ; RV64I-NEXT: sub sp, s0, a0 -; RV64I-NEXT: lui a0, 2 ; RV64I-NEXT: addiw a0, a0, -2032 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload