diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1014,6 +1014,10 @@ if (AM.BaseGV) return false; + // RVV instructions only support register addressing. + if (Subtarget.hasVInstructions() && isa(Ty)) + return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs; + // Require a 12-bit signed offset. if (!isInt<12>(AM.BaseOffs)) return false; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll @@ -596,7 +596,6 @@ ; ; CHECK-ASM-LABEL: struct_gather: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: addi a0, a0, 32 ; CHECK-ASM-NEXT: addi a1, a1, 132 ; CHECK-ASM-NEXT: li a2, 1024 ; CHECK-ASM-NEXT: li a3, 16 @@ -606,13 +605,13 @@ ; CHECK-ASM-NEXT: vsetivli zero, 8, e32, m1, ta, mu ; CHECK-ASM-NEXT: vlse32.v v8, (a4), a3 ; CHECK-ASM-NEXT: vlse32.v v9, (a1), a3 -; CHECK-ASM-NEXT: addi a4, a0, -32 -; CHECK-ASM-NEXT: vle32.v v10, (a4) -; CHECK-ASM-NEXT: vle32.v v11, (a0) +; CHECK-ASM-NEXT: vle32.v v10, (a0) +; CHECK-ASM-NEXT: addi a4, a0, 32 +; CHECK-ASM-NEXT: vle32.v v11, (a4) ; CHECK-ASM-NEXT: vadd.vv v8, v10, v8 ; CHECK-ASM-NEXT: vadd.vv v9, v11, v9 -; CHECK-ASM-NEXT: vse32.v v8, (a4) -; CHECK-ASM-NEXT: vse32.v v9, (a0) +; CHECK-ASM-NEXT: vse32.v v8, (a0) +; CHECK-ASM-NEXT: vse32.v v9, (a4) ; CHECK-ASM-NEXT: addi a2, a2, -16 ; CHECK-ASM-NEXT: addi a0, a0, 64 ; CHECK-ASM-NEXT: addi a1, a1, 256 @@ -838,17 +837,16 @@ ; ; CHECK-ASM-LABEL: gather_of_pointers: ; CHECK-ASM: # %bb.0: -; CHECK-ASM-NEXT: addi a0, a0, 16 ; CHECK-ASM-NEXT: li a2, 1024 ; CHECK-ASM-NEXT: li a3, 40 ; CHECK-ASM-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 -; CHECK-ASM-NEXT: addi a4, a1, 80 ; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-ASM-NEXT: vlse64.v v8, (a1), a3 +; CHECK-ASM-NEXT: addi a4, a1, 80 ; CHECK-ASM-NEXT: vlse64.v v9, (a4), a3 -; CHECK-ASM-NEXT: addi a4, a0, -16 -; CHECK-ASM-NEXT: vse64.v v8, (a4) -; CHECK-ASM-NEXT: vse64.v v9, (a0) +; CHECK-ASM-NEXT: vse64.v v8, (a0) +; CHECK-ASM-NEXT: addi a4, a0, 16 +; CHECK-ASM-NEXT: vse64.v v9, (a4) ; CHECK-ASM-NEXT: addi a2, a2, -4 ; CHECK-ASM-NEXT: addi a0, a0, 32 ; CHECK-ASM-NEXT: addi a1, a1, 160 @@ -912,14 +910,13 @@ ; ; CHECK-ASM-LABEL: scatter_of_pointers: ; CHECK-ASM: # %bb.0: -; CHECK-ASM-NEXT: addi a1, a1, 16 ; CHECK-ASM-NEXT: li a2, 1024 ; CHECK-ASM-NEXT: li a3, 40 ; CHECK-ASM-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; CHECK-ASM-NEXT: addi a4, a1, -16 ; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-ASM-NEXT: vle64.v v8, (a4) -; CHECK-ASM-NEXT: vle64.v v9, (a1) +; CHECK-ASM-NEXT: vle64.v v8, (a1) +; CHECK-ASM-NEXT: addi a4, a1, 16 +; CHECK-ASM-NEXT: vle64.v v9, (a4) ; CHECK-ASM-NEXT: addi a4, a0, 80 ; CHECK-ASM-NEXT: vsse64.v v8, (a0), a3 ; CHECK-ASM-NEXT: vsse64.v v9, (a4), a3