diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -568,8 +568,8 @@ } ; FIXME: Use vse64.v on RV32 to avoid two scalar extracts and two scalar stores. -define void @store_extractelt_v4i64(<2 x i64>* %x, i64* %p) nounwind { -; RV32-LABEL: store_extractelt_v4i64: +define void @store_extractelt_v2i64(<2 x i64>* %x, i64* %p) nounwind { +; RV32-LABEL: store_extractelt_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) @@ -583,7 +583,7 @@ ; RV32-NEXT: sw a0, 4(a1) ; RV32-NEXT: ret ; -; RV64-LABEL: store_extractelt_v4i64: +; RV64-LABEL: store_extractelt_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) @@ -597,8 +597,8 @@ ret void } -define void @store_extractelt_v4f64(<2 x double>* %x, double* %p) nounwind { -; CHECK-LABEL: store_extractelt_v4f64: +define void @store_extractelt_v2f64(<2 x double>* %x, double* %p) nounwind { +; CHECK-LABEL: store_extractelt_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll @@ -34,26 +34,6 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret -; RV32-LABEL: vnsra_v2i64_v2i32_scalar: -; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: sw a1, 12(sp) -; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64,m1,ta,mu -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v25, (a0), zero -; RV32-NEXT: vsra.vv v25, v8, v25 -; RV32-NEXT: vsetivli zero, 2, e32,mf2,ta,mu -; RV32-NEXT: vnsrl.wi v8, v25, 0 -; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: ret -; RV64-LABEL: vnsra_v2i64_v2i32_scalar: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32,mf2,ta,mu -; RV64-NEXT: vnsra.wx v25, v8, a0 -; RV64-NEXT: vmv1r.v v8, v25 -; RV64-NEXT: ret %insert = insertelement <2 x i64> poison, i64 %y, i32 0 %splat = shufflevector <2 x i64> %insert, <2 x i64> poison, <2 x i32> zeroinitializer %a = ashr <2 x i64> %x, %splat @@ -126,26 +106,6 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret -; RV32-LABEL: vnsrl_v2i64_v2i32_scalar: -; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: sw a1, 12(sp) -; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64,m1,ta,mu -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v25, (a0), zero -; RV32-NEXT: vsrl.vv v25, v8, v25 -; RV32-NEXT: vsetivli zero, 2, e32,mf2,ta,mu -; RV32-NEXT: vnsrl.wi v8, v25, 0 -; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: ret -; RV64-LABEL: vnsrl_v2i64_v2i32_scalar: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32,mf2,ta,mu -; RV64-NEXT: vnsrl.wx v25, v8, a0 -; RV64-NEXT: vmv1r.v v8, v25 -; RV64-NEXT: ret %insert = insertelement <2 x i64> poison, i64 %y, i32 0 %splat = shufflevector <2 x i64> %insert, <2 x i64> poison, <2 x i32> zeroinitializer %a = lshr <2 x i64> %x, %splat