Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5718,7 +5718,7 @@ Load->getMemoryVT(), Load->getMemOperand()); SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); - return DAG.getMergeValues({Result, Load->getChain()}, DL); + return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL); } SDValue Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -1138,10 +1138,10 @@ ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vle32.v v13, (a1) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vle32.v v14, (a0) ; LMULMAX1-NEXT: addi a0, a1, 32 ; LMULMAX1-NEXT: vle32.v v15, (a0) +; LMULMAX1-NEXT: addi a0, a1, 16 +; LMULMAX1-NEXT: vle32.v v14, (a0) ; LMULMAX1-NEXT: addi a0, a1, 48 ; LMULMAX1-NEXT: vle32.v v16, (a0) ; LMULMAX1-NEXT: addi a0, a1, 64 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll @@ -54,19 +54,19 @@ define void @gather_const_v64f16(<64 x half>* %x) { ; LMULMAX8-LABEL: gather_const_v64f16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, a0, 94 -; LMULMAX8-NEXT: li a2, 64 -; LMULMAX8-NEXT: vsetvli zero, a2, e16, m8, ta, mu -; LMULMAX8-NEXT: vlse16.v v8, (a1), zero +; LMULMAX8-NEXT: li a1, 64 +; LMULMAX8-NEXT: addi a2, a0, 94 +; LMULMAX8-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; LMULMAX8-NEXT: vlse16.v v8, (a2), zero ; LMULMAX8-NEXT: vse16.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v64f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: addi a3, a0, 32 -; LMULMAX1-NEXT: addi a4, a0, 80 +; LMULMAX1-NEXT: addi a1, a0, 80 +; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: addi a3, a0, 48 +; LMULMAX1-NEXT: addi a4, a0, 32 ; LMULMAX1-NEXT: addi a5, a0, 94 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vlse16.v v8, (a5), zero @@ -76,11 +76,11 @@ ; LMULMAX1-NEXT: vse16.v v8, (a7) ; LMULMAX1-NEXT: vse16.v v8, (a6) ; LMULMAX1-NEXT: vse16.v v8, (a5) +; LMULMAX1-NEXT: vse16.v v8, (a1) ; LMULMAX1-NEXT: vse16.v v8, (a4) ; LMULMAX1-NEXT: vse16.v v8, (a3) -; LMULMAX1-NEXT: vse16.v v8, (a2) ; LMULMAX1-NEXT: vse16.v v8, (a0) -; LMULMAX1-NEXT: vse16.v v8, (a1) +; LMULMAX1-NEXT: vse16.v v8, (a2) ; LMULMAX1-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = extractelement <64 x half> %a, i32 47 @@ -93,33 +93,33 @@ define void @gather_const_v32f32(<32 x float>* %x) { ; LMULMAX8-LABEL: gather_const_v32f32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, a0, 68 -; LMULMAX8-NEXT: li a2, 32 -; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu -; LMULMAX8-NEXT: vlse32.v v8, (a1), zero +; LMULMAX8-NEXT: li a1, 32 +; LMULMAX8-NEXT: addi a2, a0, 68 +; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; LMULMAX8-NEXT: vlse32.v v8, (a2), zero ; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v32f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: addi a3, a0, 32 -; LMULMAX1-NEXT: addi a4, a0, 80 +; LMULMAX1-NEXT: addi a1, a0, 64 +; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: addi a3, a0, 48 +; LMULMAX1-NEXT: addi a4, a0, 32 ; LMULMAX1-NEXT: addi a5, a0, 68 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vlse32.v v8, (a5), zero -; LMULMAX1-NEXT: addi a5, a0, 64 +; LMULMAX1-NEXT: addi a5, a0, 80 ; LMULMAX1-NEXT: addi a6, a0, 112 ; LMULMAX1-NEXT: addi a7, a0, 96 ; LMULMAX1-NEXT: vse32.v v8, (a7) ; LMULMAX1-NEXT: vse32.v v8, (a6) +; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vse32.v v8, (a5) ; LMULMAX1-NEXT: vse32.v v8, (a4) ; LMULMAX1-NEXT: vse32.v v8, (a3) -; LMULMAX1-NEXT: vse32.v v8, (a2) ; LMULMAX1-NEXT: vse32.v v8, (a0) -; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: vse32.v v8, (a2) ; LMULMAX1-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = extractelement <32 x float> %a, i32 17 @@ -140,23 +140,23 @@ ; ; LMULMAX1-LABEL: gather_const_v16f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: addi a3, a0, 32 -; LMULMAX1-NEXT: addi a4, a0, 80 +; LMULMAX1-NEXT: addi a1, a0, 80 +; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: addi a3, a0, 48 +; LMULMAX1-NEXT: addi a4, a0, 32 ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; LMULMAX1-NEXT: vlse64.v v8, (a4), zero +; LMULMAX1-NEXT: vlse64.v v8, (a1), zero ; LMULMAX1-NEXT: addi a5, a0, 64 ; LMULMAX1-NEXT: addi a6, a0, 112 ; LMULMAX1-NEXT: addi a7, a0, 96 ; LMULMAX1-NEXT: vse64.v v8, (a7) ; LMULMAX1-NEXT: vse64.v v8, (a6) ; LMULMAX1-NEXT: vse64.v v8, (a5) +; LMULMAX1-NEXT: vse64.v v8, (a1) ; LMULMAX1-NEXT: vse64.v v8, (a4) ; LMULMAX1-NEXT: vse64.v v8, (a3) -; LMULMAX1-NEXT: vse64.v v8, (a2) ; LMULMAX1-NEXT: vse64.v v8, (a0) -; LMULMAX1-NEXT: vse64.v v8, (a1) +; LMULMAX1-NEXT: vse64.v v8, (a2) ; LMULMAX1-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = extractelement <16 x double> %a, i32 10 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll @@ -71,10 +71,10 @@ define void @gather_const_v64i8(<64 x i8>* %x) { ; LMULMAX4-LABEL: gather_const_v64i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi a1, a0, 32 -; LMULMAX4-NEXT: li a2, 64 -; LMULMAX4-NEXT: vsetvli zero, a2, e8, m4, ta, mu -; LMULMAX4-NEXT: vlse8.v v8, (a1), zero +; LMULMAX4-NEXT: li a1, 64 +; LMULMAX4-NEXT: addi a2, a0, 32 +; LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; LMULMAX4-NEXT: vlse8.v v8, (a2), zero ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret ; @@ -101,10 +101,10 @@ define void @gather_const_v16i16(<32 x i16>* %x) { ; LMULMAX4-LABEL: gather_const_v16i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi a1, a0, 50 -; LMULMAX4-NEXT: li a2, 32 -; LMULMAX4-NEXT: vsetvli zero, a2, e16, m4, ta, mu -; LMULMAX4-NEXT: vlse16.v v8, (a1), zero +; LMULMAX4-NEXT: li a1, 32 +; LMULMAX4-NEXT: addi a2, a0, 50 +; LMULMAX4-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; LMULMAX4-NEXT: vlse16.v v8, (a2), zero ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret ; @@ -113,13 +113,13 @@ ; LMULMAX1-NEXT: addi a1, a0, 50 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vlse16.v v8, (a1), zero -; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: addi a2, a0, 48 +; LMULMAX1-NEXT: addi a1, a0, 48 +; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: addi a3, a0, 32 ; LMULMAX1-NEXT: vse16.v v8, (a3) -; LMULMAX1-NEXT: vse16.v v8, (a2) -; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: vse16.v v8, (a1) +; LMULMAX1-NEXT: vse16.v v8, (a0) +; LMULMAX1-NEXT: vse16.v v8, (a2) ; LMULMAX1-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = extractelement <32 x i16> %a, i32 25 @@ -143,13 +143,13 @@ ; LMULMAX1-NEXT: addi a1, a0, 36 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vlse32.v v8, (a1), zero -; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: addi a3, a0, 32 +; LMULMAX1-NEXT: addi a1, a0, 32 +; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: addi a3, a0, 48 +; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vse32.v v8, (a3) -; LMULMAX1-NEXT: vse32.v v8, (a2) ; LMULMAX1-NEXT: vse32.v v8, (a0) -; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: vse32.v v8, (a2) ; LMULMAX1-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = extractelement <16 x i32> %a, i32 9 Index: llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll +++ llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll @@ -11,12 +11,12 @@ ; CHECK-NEXT: addi a0, a0, %lo(c) ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: addi a1, a0, 16 +; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: addi a1, a0, 8 ; CHECK-NEXT: vse64.v v8, (a1) -; CHECK-NEXT: addi a1, a0, 16 -; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: addi a0, a0, 24 -; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: vse64.v v9, (a0) ; CHECK-NEXT: ret entry: ; this thing is "__builtin_memmove(&c[1], &c[0], sizeof(c[0]) * 4);"