diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1936,7 +1936,9 @@ return SDValue(); SDValue Vec = SplatVal.getOperand(0); // Only perform this optimization on vectors of the same size for simplicity. - if (Vec.getValueType() != VT) + // Don't perform this optimization for i1 vectors. + // FIXME: Support i1 vectors, maybe by promoting to i8? + if (Vec.getValueType() != VT || VT.getVectorElementType() == MVT::i1) return SDValue(); SDValue Idx = SplatVal.getOperand(1); // The index must be a legal type. diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll @@ -191,3 +191,24 @@ %splat = shufflevector %head, poison, zeroinitializer ret %splat } + +define @splat_idx_nxv4i32( %v, i64 %idx) { +; CHECK-LABEL: splat_idx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %x = extractelement %v, i64 %idx + %ins = insertelement poison, i1 %x, i32 0 + %splat = shufflevector %ins, poison, zeroinitializer + ret %splat +} +