diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1164,6 +1164,20 @@ Known = KnownScl.trunc(BitWidth); break; } + case ISD::SPLAT_VECTOR_PARTS: { + unsigned NumSclBits = Op.getOperand(0).getScalarValueSizeInBits(); + assert(NumSclBits * Op.getNumOperands() == BitWidth && + "Expected SPLAT_VECTOR_PARTS scalars to cover element width"); + for (auto [I, Scl] : enumerate(Op->ops())) { + APInt DemandedSclBits = + DemandedBits.extractBits(NumSclBits, NumSclBits * I); + KnownBits KnownScl; + if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) + return true; + Known.insertBits(KnownScl, NumSclBits * I); + } + break; + } case ISD::LOAD: { auto *LD = cast(Op); if (getTargetConstantFromLoad(LD)) { diff --git a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll @@ -93,17 +93,13 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: sw a1, 4(sp) -; CHECK-NEXT: sw a0, 0(sp) -; CHECK-NEXT: mv a0, sp -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: sw zero, 12(sp) -; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: li a1, -1 +; CHECK-NEXT: sw a1, 8(sp) +; CHECK-NEXT: addi a1, sp, 8 +; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; CHECK-NEXT: vlse64.v v8, (a1), zero +; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %1 = insertelement poison, i64 %x, i32 0