Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9935,6 +9935,15 @@ if (LD->getOpcode() != ISD::LOAD) return SDValue(); + // The vector lane must be a constant in the LD1LANE opcode. + SDValue Lane; + if (IsLaneOp) { + Lane = N->getOperand(2); + auto *LaneC = dyn_cast(Lane); + if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements()) + return SDValue(); + } + LoadSDNode *LoadSDN = cast(LD); EVT MemVT = LoadSDN->getMemoryVT(); // Check if memory operand is the same type as the vector element. @@ -9991,7 +10000,7 @@ Ops.push_back(LD->getOperand(0)); // Chain if (IsLaneOp) { Ops.push_back(Vector); // The vector to be inserted - Ops.push_back(N->getOperand(2)); // The lane to be inserted in the vector + Ops.push_back(Lane); // The lane to be inserted in the vector } Ops.push_back(Addr); Ops.push_back(Inc); Index: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll @@ -28,6 +28,28 @@ ret void } +; Avoid an assert/bad codegen in LD1LANEPOST lowering by not forming +; LD1LANEPOST ISD nodes with a non-constant lane index. +define <4 x i32> @f2(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2, i32 %idx) { + %L0 = load i32, i32* %p + %p1 = getelementptr i32, i32* %p, i64 1 + %L1 = load i32, i32* %p1 + %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2 + %vret = insertelement <4 x i32> %v, i32 %L0, i32 %idx + store i32 %L1, i32 *%p + ret <4 x i32> %vret +} + +; Check that a cycle is avoided during isel between the LD1LANEPOST instruction and the load of %L1. +define <4 x i32> @f3(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2) { + %L0 = load i32, i32* %p + %p1 = getelementptr i32, i32* %p, i64 1 + %L1 = load i32, i32* %p1 + %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2 + %vret = insertelement <4 x i32> %v, i32 %L0, i32 %L1 + ret <4 x i32> %vret +} + ; Function Attrs: nounwind readnone declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1