Index: llvm/trunk/lib/Target/X86/X86.td =================================================================== --- llvm/trunk/lib/Target/X86/X86.td +++ llvm/trunk/lib/Target/X86/X86.td @@ -99,6 +99,8 @@ "Bit testing of memory is slow">; def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true", "SHLD instruction is slow">; +def FeatureSlowPMULLD : SubtargetFeature<"slow-pmulld", "IsPMULLDSlow", "true", + "PMULLD instruction is slow">; // FIXME: This should not apply to CPUs that do not have SSE. def FeatureSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16", "IsUAMem16Slow", "true", @@ -403,6 +405,7 @@ FeatureSlowLEA, FeatureSlowIncDec, FeatureSlowBTMem, + FeatureSlowPMULLD, FeatureLAHFSAHF ]>; def : SilvermontProc<"silvermont">; Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -29302,10 +29302,17 @@ /// generate pmullw+pmulhuw for it (MULU16 mode). static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { - // pmulld is supported since SSE41. It is better to use pmulld - // instead of pmullw+pmulhw. + // Check for legality // pmullw/pmulhw are not supported by SSE. - if (Subtarget.hasSSE41() || !Subtarget.hasSSE2()) + if (!Subtarget.hasSSE2()) + return SDValue(); + + // Check for profitability + // pmulld is supported since SSE41. It is better to use pmulld + // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than + // the expansion. + bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize(); + if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) return SDValue(); ShrinkMode Mode; Index: llvm/trunk/lib/Target/X86/X86Subtarget.h =================================================================== --- llvm/trunk/lib/Target/X86/X86Subtarget.h +++ llvm/trunk/lib/Target/X86/X86Subtarget.h @@ -178,6 +178,10 @@ /// True if SHLD instructions are slow. bool IsSHLDSlow; + /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and + // PMULUDQ. + bool IsPMULLDSlow; + /// True if unaligned memory accesses of 16-bytes are slow. bool IsUAMem16Slow; @@ -452,6 +456,7 @@ bool hasMWAITX() const { return HasMWAITX; } bool isBTMemSlow() const { return IsBTMemSlow; } bool isSHLDSlow() const { return IsSHLDSlow; } + bool isPMULLDSlow() const { return IsPMULLDSlow; } bool isUnalignedMem16Slow() const { return IsUAMem16Slow; } bool isUnalignedMem32Slow() const { return IsUAMem32Slow; } bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; } Index: llvm/trunk/lib/Target/X86/X86Subtarget.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86Subtarget.cpp +++ llvm/trunk/lib/Target/X86/X86Subtarget.cpp @@ -228,6 +228,9 @@ else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() || isTargetKFreeBSD() || In64BitMode) stackAlignment = 16; + + assert((!isPMULLDSlow() || hasSSE41()) && + "Feature Slow PMULLD can only be set on a subtarget with SSE4.1"); } void X86Subtarget::initializeEnvironment() { @@ -275,6 +278,7 @@ HasMWAITX = false; HasMPX = false; IsBTMemSlow = false; + IsPMULLDSlow = false; IsSHLDSlow = false; IsUAMem16Slow = false; IsUAMem32Slow = false; Index: llvm/trunk/test/CodeGen/X86/slow-pmulld.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/slow-pmulld.ll +++ llvm/trunk/test/CodeGen/X86/slow-pmulld.ll @@ -0,0 +1,71 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK64 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-64 + +define <4 x i32> @foo(<4 x i8> %A) { +; CHECK32-LABEL: foo: +; CHECK32: # BB#0: +; CHECK32-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u] +; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u> +; CHECK32-NEXT: movdqa %xmm0, %xmm2 +; CHECK32-NEXT: pmullw %xmm1, %xmm0 +; CHECK32-NEXT: pmulhw %xmm1, %xmm2 +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: foo: +; CHECK64: # BB#0: +; CHECK64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u] +; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u> +; CHECK64-NEXT: movdqa %xmm0, %xmm2 +; CHECK64-NEXT: pmullw %xmm1, %xmm0 +; CHECK64-NEXT: pmulhw %xmm1, %xmm2 +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: foo: +; SSE4-32: # BB#0: +; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: foo: +; SSE4-64: # BB#0: +; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: retq + %z = zext <4 x i8> %A to <4 x i32> + %m = mul nuw nsw <4 x i32> %z, + ret <4 x i32> %m +} + +define <4 x i32> @foo_os(<4 x i8> %A) minsize { +; CHECK32-LABEL: foo_os: +; CHECK32: # BB#0: +; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: foo_os: +; CHECK64: # BB#0: +; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0 +; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: foo_os: +; SSE4-32: # BB#0: +; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: foo_os: +; SSE4-64: # BB#0: +; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: retq + %z = zext <4 x i8> %A to <4 x i32> + %m = mul nuw nsw <4 x i32> %z, + ret <4 x i32> %m +}