Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1452,6 +1452,25 @@ return maxnum(Src0, Src1); } +/// Convert a vector load intrinsic into a simple llvm load instruction. +/// This is beneficial when the underlying object being addressed comes +/// from a constant, since we get constant-folding for free. +static Value *simplifyNeonVld1(const IntrinsicInst &II, + unsigned MemAlign, + InstCombiner::BuilderTy &Builder) { + auto *IntrAlign = dyn_cast(II.getArgOperand(1)); + + if (!IntrAlign) + return nullptr; + + unsigned Alignment = IntrAlign->getZExtValue() < MemAlign ? + MemAlign : IntrAlign->getZExtValue(); + + auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0), + PointerType::get(II.getType(), 0)); + return Builder.CreateAlignedLoad(BCastInst, Alignment); +} + // Returns true iff the 2 intrinsics have the same operands, limiting the // comparison to the first NumOperands. static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, @@ -2968,7 +2987,14 @@ } break; - case Intrinsic::arm_neon_vld1: + case Intrinsic::arm_neon_vld1: { + unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), + DL, II, &AC, &DT); + if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder)) + return replaceInstUsesWith(*II, V); + break; + } + case Intrinsic::arm_neon_vld2: case Intrinsic::arm_neon_vld3: case Intrinsic::arm_neon_vld4: Index: test/Transforms/InstCombine/ARM/vld1.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/ARM/vld1.ll @@ -0,0 +1,91 @@ +; RUN: opt < %s -instcombine -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py + +target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8-arm-none-eabi" + +; Turning a vld1 intrinsic into an llvm load is beneficial +; when the underlying object being addressed comes from a +; constant, since we get constant-folding for free. + +; Bail the optimization if the alignment is not a constant. +define <2 x i64> @vld1_align(i8* %ptr, i32 %align) { +; CHECK-NOT: bitcast i8* %ptr to <2 x i64>* +; CHECK-NOT: load <2 x i64>, <2 x i64>* +; CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8 + %vld1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8(i8* %ptr, i32 %align) + ret <2 x i64> %vld1 +} + +define <8 x i8> @vld1_8x8(i8* %ptr) { +; CHECK-NOT: call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <8 x i8>* +; CHECK: load <8 x i8>, <8 x i8>* [[bcast]], align 8 + %vld1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %ptr, i32 8) + ret <8 x i8> %vld1 +} + +define <4 x i16> @vld1_4x16(i8* %ptr) { +; CHECK-NOT: call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <4 x i16>* +; CHECK: load <4 x i16>, <4 x i16>* [[bcast]], align 16 + %vld1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8* %ptr, i32 16) + ret <4 x i16> %vld1 +} + +define <2 x i32> @vld1_2x32(i8* %ptr) { +; CHECK-NOT: call <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <2 x i32>* +; CHECK: load <2 x i32>, <2 x i32>* [[bcast]], align 32 + %vld1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8(i8* %ptr, i32 32) + ret <2 x i32> %vld1 +} + +define <1 x i64> @vld1_1x64(i8* %ptr) { +; CHECK-NOT: call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <1 x i64>* +; CHECK: load <1 x i64>, <1 x i64>* [[bcast]], align 64 + %vld1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %ptr, i32 64) + ret <1 x i64> %vld1 +} + +define <8 x i16> @vld1_8x16(i8* %ptr) { +; CHECK-NOT: call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <8 x i16>* +; CHECK: load <8 x i16>, <8 x i16>* [[bcast]], align 16 + %vld1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %ptr, i32 16) + ret <8 x i16> %vld1 +} + +define <16 x i8> @vld1_16x8(i8* %ptr) { +; CHECK-NOT: call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <16 x i8>* +; CHECK: load <16 x i8>, <16 x i8>* [[bcast]], align 8 + %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %ptr, i32 8) + ret <16 x i8> %vld1 +} + +define <4 x i32> @vld1_4x32(i8* %ptr) { +; CHECK-NOT: call <4 x i32> @llvm.arm.neon.vld1.v4i32.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <4 x i32>* +; CHECK: load <4 x i32>, <4 x i32>* [[bcast]], align 32 + %vld1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32.p0i8(i8* %ptr, i32 32) + ret <4 x i32> %vld1 +} + +define <2 x i64> @vld1_2x64(i8* %ptr) { +; CHECK-NOT: call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8 +; CHECK: [[bcast:%.*]] = bitcast i8* %ptr to <2 x i64>* +; CHECK: load <2 x i64>, <2 x i64>* [[bcast]], align 64 + %vld1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8(i8* %ptr, i32 64) + ret <2 x i64> %vld1 +} + +declare <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8*, i32) +declare <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8*, i32) +declare <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8(i8*, i32) +declare <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8*, i32) +declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) +declare <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8*, i32) +declare <4 x i32> @llvm.arm.neon.vld1.v4i32.p0i8(i8*, i32) +declare <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8(i8*, i32)