Index: llvm/trunk/lib/Target/ARM/ARMISelLowering.h =================================================================== --- llvm/trunk/lib/Target/ARM/ARMISelLowering.h +++ llvm/trunk/lib/Target/ARM/ARMISelLowering.h @@ -283,6 +283,8 @@ using TargetLowering::isZExtFree; bool isZExtFree(SDValue Val, EVT VT2) const override; + bool isVectorLoadExtDesirable(SDValue ExtVal) const override; + bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; Index: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp @@ -10077,6 +10077,28 @@ return false; } +bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { + EVT VT = ExtVal.getValueType(); + + if (!isTypeLegal(VT)) + return false; + + // Don't create a loadext if we can fold the extension into a wide/long + // instruction. + // If there's more than one user instruction, the loadext is desirable no + // matter what. There can be two uses by the same instruction. + if (ExtVal->use_empty() || + !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) + return true; + + SDNode *U = *ExtVal->use_begin(); + if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || + U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) + return false; + + return true; +} + bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; Index: llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll +++ llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll @@ -3,15 +3,10 @@ define void @vector_ext_2i8_to_2i64( <2 x i8>* %loadaddr, <2 x i64>* %storeaddr ) { ; CHECK-LABEL: vector_ext_2i8_to_2i64: ; CHECK: vld1.16 {[[REG:d[0-9]+]][0]}, [r0:16] -; CHECK-NEXT: vmov.i64 [[MASK:q[0-9]+]], #0xff -; CHECK-NEXT: vrev64.32 [[MASK]], [[MASK]] ; CHECK-NEXT: vrev16.8 [[REG]], [[REG]] ; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]] ; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]] ; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] -; CHECK-NEXT: vand [[QREG]], [[QREG]], [[MASK]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] ; CHECK-NEXT: vst1.64 {[[REG]], {{d[0-9]+}}}, [r1] ; CHECK-NEXT: bx lr %1 = load <2 x i8>, <2 x i8>* %loadaddr @@ -23,14 +18,9 @@ define void @vector_ext_2i16_to_2i64( <2 x i16>* %loadaddr, <2 x i64>* %storeaddr ) { ; CHECK-LABEL: vector_ext_2i16_to_2i64: ; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}, [r0:32] -; CHECK-NEXT: vmov.i64 [[MASK:q[0-9]+]], #0xffff -; CHECK-NEXT: vrev64.32 [[MASK]], [[MASK]] ; CHECK-NEXT: vrev32.16 [[REG]], [[REG]] ; CHECK-NEXT: vmovl.u16 [[QREG:q[0-9]+]], [[REG]] ; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] -; CHECK-NEXT: vand [[QREG]], [[QREG]], [[MASK]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] ; CHECK-NEXT: vst1.64 {[[REG]], {{d[0-9]+}}}, [r1] ; CHECK-NEXT: bx lr %1 = load <2 x i16>, <2 x i16>* %loadaddr Index: llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll +++ llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll @@ -2,7 +2,7 @@ ; CHECK-LABEL: f: define float @f(<4 x i16>* nocapture %in) { - ; CHECK: vldr + ; CHECK: vld1 ; CHECK: vmovl.u16 ; CHECK-NOT: vand %1 = load <4 x i16>, <4 x i16>* %in Index: llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll +++ llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll @@ -2,7 +2,7 @@ ; CHECK-LABEL: f: define float @f(<4 x i16>* nocapture %in) { - ; CHECK: vldr + ; CHECK: vld1 ; CHECK: vmovl.u16 %1 = load <4 x i16>, <4 x i16>* %in ; CHECK: vcvt.f32.u32