Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9515,8 +9515,11 @@ } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) { // The lane is incremented by the index of the extract. // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3 - Lane += V.getConstantOperandVal(1); - V = V.getOperand(0); + auto VecVT = V.getOperand(0).getValueType(); + if (VecVT.isFixedLengthVector() && VecVT.getFixedSizeInBits() <= 128) { + Lane += V.getConstantOperandVal(1); + V = V.getOperand(0); + } } else if (V.getOpcode() == ISD::CONCAT_VECTORS) { // The lane is decremented if we are splatting from the 2nd operand. // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1 Index: llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mattr=+sve -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +define <4 x i32> @test(<16 x i32>* %arg1, <16 x i32>* %arg2) { +; CHECK-LABEL: test: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: mov x8, #8 +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p0, [x9] +; CHECK-NEXT: ldr q2, [sp, #16] +; CHECK-NEXT: add z1.s, p0/m, z1.s, z1.s +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: add z3.s, p0/m, z3.s, z0.s +; CHECK-NEXT: st1w { z3.s }, p0, [x0, x8, lsl #2] +; CHECK-NEXT: dup v0.4s, v2.s[2] +; CHECK-NEXT: st1w { z1.s }, p0, [x0] +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret +entry: + %0 = load <16 x i32>, <16 x i32>* %arg1, align 256 + %1 = load <16 x i32>, <16 x i32>* %arg2, align 256 + %shvec = shufflevector <16 x i32> %0, <16 x i32> %1, <4 x i32> + %2 = add <16 x i32> %0, %0 + store <16 x i32> %2, <16 x i32>* %arg1, align 256 + ret <4 x i32> %shvec +}