diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -969,6 +969,12 @@ return; } + if (RISCV::isFaultFirstLoad(MI)) { + // Update AVL to vl-output of the fault first load. + Info.setAVLReg(MI.getOperand(1).getReg()); + return; + } + // If this is something that updates VL/VTYPE that we don't know about, set // the state to unknown. if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) || @@ -1259,11 +1265,7 @@ continue; } - // If this is something that updates VL/VTYPE that we don't know about, - // set the state to unknown. - if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) || - MI.modifiesRegister(RISCV::VTYPE)) - CurInfo = VSETVLIInfo::getUnknown(); + transferAfter(CurInfo, MI); } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -492,14 +492,13 @@ } ; Fault first loads can modify VL. -; TODO: The first and third VSETVLIs are redundant here. +; TODO: The VSETVLI of vadd could be removed here. define @vleNff(i64* %str, i64 %n, i64 %x) { ; CHECK-LABEL: vleNff: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vadd.vx v8, v8, a2 ; CHECK-NEXT: ret entry: @@ -512,6 +511,25 @@ ret %5 } +; Similiar test case, but use same policy for vleff and vadd. +; Note: The test may be redundant if we could fix the TODO of @vleNff. +define @vleNff2(i64* %str, i64 %n, i64 %x) { +; CHECK-LABEL: vleNff2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: vadd.vx v8, v8, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2) + %1 = bitcast i64* %str to * + %2 = tail call { , i64 } @llvm.riscv.vleff.nxv1i64.i64( undef, * %1, i64 %0) + %3 = extractvalue { , i64 } %2, 0 + %4 = extractvalue { , i64 } %2, 1 + %5 = tail call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, %3, i64 %x, i64 %4) + ret %5 +} + declare { , i64 } @llvm.riscv.vleff.nxv1i64.i64( , * nocapture, i64)