diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1239,8 +1239,10 @@ MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI)); if (CurInfo.hasAVLImm()) VLOp.ChangeToImmediate(CurInfo.getAVLImm()); - else + else { + MRI->clearKillFlags(CurInfo.getAVLReg()); VLOp.ChangeToRegister(CurInfo.getAVLReg(), /*IsDef*/ false); + } CurInfo = computeInfoForInstr(MI, TSFlags, MRI); continue; } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -521,6 +521,20 @@ declare @llvm.riscv.vmseq.nxv1i64.i64.i64( , i64, i64) +; Ensure AVL register is alive when forwarding an AVL immediate that does not fit in 5 bits +define @avl_forward5(* %addr) { +; CHECK-LABEL: avl_forward5: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret + %gvl = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2) + %ret = tail call @llvm.riscv.vle.nxv2i32.i64( undef, * %addr, i64 %gvl) + ret %ret +} + declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( , ,