diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2379,8 +2379,12 @@ SDValue ValInVec; if (IsLegalInsert) { - if (isNullConstant(Idx)) - return DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Vec, Val, VL); + if (isNullConstant(Idx)) { + Vec = DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Vec, Val, VL); + if (!VecVT.isFixedLengthVector()) + return Vec; + return convertFromScalableVector(VecVT, Vec, DAG, Subtarget); + } ValInVec = DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL); } else { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -178,7 +178,7 @@ ; RV32-NEXT: vle64.v v28, (a0) ; RV32-NEXT: addi a1, zero, -1 ; RV32-NEXT: vmv.s.x v28, a1 -; RV32-NEXT: vs4r.v v28, (a0) +; RV32-NEXT: vse64.v v28, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8i64_0: @@ -235,7 +235,7 @@ ; RV32-NEXT: vle64.v v28, (a0) ; RV32-NEXT: addi a1, zero, 6 ; RV32-NEXT: vmv.s.x v28, a1 -; RV32-NEXT: vs4r.v v28, (a0) +; RV32-NEXT: vse64.v v28, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c6_v8i64_0: @@ -284,3 +284,35 @@ store <8 x i64> %b, <8 x i64>* %x ret void } + +; Test that using a insertelement at element 0 by a later operation doesn't +; crash the compiler. +define void @insertelt_c6_v8i64_0_add(<8 x i64>* %x, <8 x i64>* %y) { +; RV32-LABEL: insertelt_c6_v8i64_0_add: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a2, 8, e64,m4,ta,mu +; RV32-NEXT: vle64.v v28, (a0) +; RV32-NEXT: vle64.v v8, (a1) +; RV32-NEXT: addi a1, zero, 6 +; RV32-NEXT: vmv.s.x v28, a1 +; RV32-NEXT: vadd.vv v28, v28, v8 +; RV32-NEXT: vse64.v v28, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: insertelt_c6_v8i64_0_add: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a2, 8, e64,m4,ta,mu +; RV64-NEXT: vle64.v v28, (a0) +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: addi a1, zero, 6 +; RV64-NEXT: vmv.s.x v28, a1 +; RV64-NEXT: vadd.vv v28, v28, v8 +; RV64-NEXT: vse64.v v28, (a0) +; RV64-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %x + %b = insertelement <8 x i64> %a, i64 6, i32 0 + %c = load <8 x i64>, <8 x i64>* %y + %d = add <8 x i64> %b, %c + store <8 x i64> %d, <8 x i64>* %x + ret void +}