Index: llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1234,8 +1234,10 @@ MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI)); if (CurInfo.hasAVLImm()) VLOp.ChangeToImmediate(CurInfo.getAVLImm()); - else + else { + MRI->clearKillFlags(CurInfo.getAVLReg()); VLOp.ChangeToRegister(CurInfo.getAVLReg(), /*IsDef*/ false); + } CurInfo = computeInfoForInstr(MI, TSFlags, MRI); continue; } Index: llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -493,6 +493,20 @@ ret i64 %vl } +; Ensure AVL register is alive when forwarding an AVL immediate that does not fit in 5 bits +define @avl_forward5(* %addr) { +; CHECK-LABEL: avl_forward5: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret + %gvl = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2) + %ret = tail call @llvm.riscv.vle.nxv2i32.i64( undef, * %addr, i64 %gvl) + ret %ret +} + declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( , ,