Index: lib/CodeGen/InterleavedAccessPass.cpp =================================================================== --- lib/CodeGen/InterleavedAccessPass.cpp +++ lib/CodeGen/InterleavedAccessPass.cpp @@ -163,14 +163,19 @@ /// <0, 2, 4, 6> (mask of index 0 to extract even elements) /// <1, 3, 5, 7> (mask of index 1 to extract odd elements) static bool isDeInterleaveMask(ArrayRef Mask, unsigned &Factor, - unsigned &Index, unsigned MaxFactor) { + unsigned &Index, unsigned MaxFactor, + unsigned NumLoadElements) { if (Mask.size() < 2) return false; // Check potential Factors. - for (Factor = 2; Factor <= MaxFactor; Factor++) + for (Factor = 2; Factor <= MaxFactor; Factor++) { + // Make sure we don't produce a load wider than the input load. + if (Mask.size() * Factor > NumLoadElements) + return false; if (isDeInterleaveMaskOfFactor(Mask, Factor, Index)) return true; + } return false; } @@ -302,9 +307,10 @@ unsigned Factor, Index; + unsigned NumLoadElements = LI->getType()->getVectorNumElements(); // Check if the first shufflevector is DE-interleave shuffle. if (!isDeInterleaveMask(Shuffles[0]->getShuffleMask(), Factor, Index, - MaxFactor)) + MaxFactor, NumLoadElements)) return false; // Holds the corresponding index for each DE-interleave shuffle. Index: test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll =================================================================== --- test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll +++ test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll @@ -352,9 +352,9 @@ ret void } -define void @load_address_space(<4 x i32> addrspace(1)* %ptr) { +define void @load_address_space(<8 x i32> addrspace(1)* %ptr) { ; NEON-LABEL: @load_address_space( -; NEON-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> addrspace(1)* %ptr to i8 addrspace(1)* +; NEON-NEXT: [[TMP1:%.*]] = bitcast <8 x i32> addrspace(1)* %ptr to i8 addrspace(1)* ; NEON-NEXT: [[VLDN:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32.p1i8(i8 addrspace(1)* [[TMP1]], i32 0) ; NEON-NEXT: [[TMP2:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[VLDN]], 2 ; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[VLDN]], 1 @@ -364,10 +364,10 @@ ; NO_NEON-NOT: @llvm.arm.neon ; NO_NEON: ret void ; - %interleaved.vec = load <4 x i32>, <4 x i32> addrspace(1)* %ptr - %v0 = shufflevector <4 x i32> %interleaved.vec, <4 x i32> undef, <2 x i32> - %v1 = shufflevector <4 x i32> %interleaved.vec, <4 x i32> undef, <2 x i32> - %v2 = shufflevector <4 x i32> %interleaved.vec, <4 x i32> undef, <2 x i32> + %interleaved.vec = load <8 x i32>, <8 x i32> addrspace(1)* %ptr + %v0 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> undef, <2 x i32> + %v1 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> undef, <2 x i32> + %v2 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> undef, <2 x i32> ret void } @@ -883,3 +883,16 @@ %v1 = shufflevector <16 x i32*> %interleaved.vec, <16 x i32*> undef, <8 x i32> ret void } + +; This would be a candidate for interleaving, except that load doesn't +; actually load enough elements to satisfy the shuffle masks. (It would be +; possible to produce a vld2.v2i32, but that currently isn't implemented.) +define void @load_out_of_range(<4 x i32>* %ptr) { +; ALL-LABEL: @load_out_of_range( +; ALL-NOT: @llvm.arm.neon +; ALL: ret void + %interleaved.vec = load <4 x i32>, <4 x i32>* %ptr, align 4 + %v0 = shufflevector <4 x i32> %interleaved.vec, <4 x i32> undef, <4 x i32> + %v1 = shufflevector <4 x i32> %interleaved.vec, <4 x i32> undef, <4 x i32> + ret void +}