diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9631,14 +9631,12 @@ MVT CastVT; if (getScaledOffsetDup(V, Lane, CastVT)) { V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0)); - } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) { + } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR && + V.getOperand(0).getValueType().is128BitVector()) { // The lane is incremented by the index of the extract. // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3 - auto VecVT = V.getOperand(0).getValueType(); - if (VecVT.isFixedLengthVector() && VecVT.getFixedSizeInBits() <= 128) { - Lane += V.getConstantOperandVal(1); - V = V.getOperand(0); - } + Lane += V.getConstantOperandVal(1); + V = V.getOperand(0); } else if (V.getOpcode() == ISD::CONCAT_VECTORS) { // The lane is decremented if we are splatting from the 2nd operand. // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1 diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll @@ -26,3 +26,27 @@ store <16 x i32> %2, <16 x i32>* %arg1, align 256 ret <4 x i32> %shvec } + +define <2 x i32> @test2(<16 x i32>* %arg1, <16 x i32>* %arg2) { +; CHECK-LABEL: test2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov x8, #8 +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, x8, lsl #2] +; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0] +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: add z2.s, p0/m, z2.s, z2.s +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #24 +; CHECK-NEXT: add z1.s, p0/m, z1.s, z1.s +; CHECK-NEXT: dup v0.2s, v0.s[0] +; CHECK-NEXT: st1w { z1.s }, p0, [x0, x8, lsl #2] +; CHECK-NEXT: st1w { z2.s }, p0, [x0] +; CHECK-NEXT: ret +entry: + %0 = load <16 x i32>, <16 x i32>* %arg1, align 256 + %1 = load <16 x i32>, <16 x i32>* %arg2, align 256 + %shvec = shufflevector <16 x i32> %0, <16 x i32> %1, <2 x i32> + %2 = add <16 x i32> %0, %0 + store <16 x i32> %2, <16 x i32>* %arg1, align 256 + ret <2 x i32> %shvec +}