diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1633,7 +1633,7 @@ bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL || Node->getOpcode() == RISCVISD::VFMV_S_F_VL; bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR; - if (HasPassthruOperand && !IsScalarMove && !Node->getOperand(0).isUndef()) + if (HasPassthruOperand && !Node->getOperand(0).isUndef()) break; SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0); auto *Ld = dyn_cast(Src); diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll @@ -291,8 +291,9 @@ define @intrinsic_vmv.s.x_x_nxv1i64_bug( %0, i64* %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64_bug: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v8, (a0), zero +; CHECK-NEXT: ld a0, 0(a0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: %a = load i64, i64* %1, align 8