Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -102,6 +102,11 @@ case ISD::CONCAT_VECTORS: Res = PromoteIntRes_CONCAT_VECTORS(N); break; + case ISD::ANY_EXTEND_VECTOR_INREG: + case ISD::SIGN_EXTEND_VECTOR_INREG: + case ISD::ZERO_EXTEND_VECTOR_INREG: + Res = PromoteIntRes_EXTEND_VECTOR_INREG(N); break; + case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: case ISD::ANY_EXTEND: Res = PromoteIntRes_INT_EXTEND(N); break; @@ -3334,6 +3339,25 @@ return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, Ops); } +SDValue DAGTypeLegalizer::PromoteIntRes_EXTEND_VECTOR_INREG(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + assert(NVT.isVector() && "This type must be promoted to a vector type"); + + SDLoc dl(N); + + // For operands whose TypeAction is to promote, the promoted node to construct + // a new *_EXTEND_VECTOR_INREG node. + if (getTypeAction(N->getOperand(0).getValueType()) + == TargetLowering::TypePromoteInteger) { + SDValue Promoted = GetPromotedInteger(N->getOperand(0)); + return DAG.getNode(N->getOpcode(), dl, NVT, Promoted); + } + + // Directly extend to the appropriate transform-to type. + return DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0)); +} + SDValue DAGTypeLegalizer::PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N) { EVT OutVT = N->getValueType(0); EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT); Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -242,6 +242,7 @@ SDValue PromoteIntRes_VECTOR_SHUFFLE(SDNode *N); SDValue PromoteIntRes_BUILD_VECTOR(SDNode *N); SDValue PromoteIntRes_SCALAR_TO_VECTOR(SDNode *N); + SDValue PromoteIntRes_EXTEND_VECTOR_INREG(SDNode *N); SDValue PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N); SDValue PromoteIntRes_CONCAT_VECTORS(SDNode *N); SDValue PromoteIntRes_BITCAST(SDNode *N); Index: llvm/trunk/test/CodeGen/X86/promote-vec3.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/promote-vec3.ll +++ llvm/trunk/test/CodeGen/X86/promote-vec3.ll @@ -0,0 +1,137 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=SSE3 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX_ANY +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX_ANY +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX_X86_64 + +define <3 x i16> @zext_i8(<3 x i8>) { +; SSE3-LABEL: zext_i8: +; SSE3: # BB#0: +; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; SSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; SSE3-NEXT: pinsrw $1, %eax, %xmm0 +; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; SSE3-NEXT: pinsrw $2, %eax, %xmm0 +; SSE3-NEXT: pxor %xmm1, %xmm1 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE3-NEXT: movd %xmm0, %eax +; SSE3-NEXT: pextrw $2, %xmm0, %edx +; SSE3-NEXT: pextrw $4, %xmm0, %ecx +; SSE3-NEXT: # kill: %AX %AX %EAX +; SSE3-NEXT: # kill: %DX %DX %EDX +; SSE3-NEXT: # kill: %CX %CX %ECX +; SSE3-NEXT: retl +; +; SSE41-LABEL: zext_i8: +; SSE41: # BB#0: +; SSE41-NEXT: pxor %xmm0, %xmm0 +; SSE41-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0 +; SSE41-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0 +; SSE41-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0 +; SSE41-NEXT: movd %xmm0, %eax +; SSE41-NEXT: pextrw $2, %xmm0, %edx +; SSE41-NEXT: pextrw $4, %xmm0, %ecx +; SSE41-NEXT: # kill: %AX %AX %EAX +; SSE41-NEXT: # kill: %DX %DX %EDX +; SSE41-NEXT: # kill: %CX %CX %ECX +; SSE41-NEXT: retl +; +; AVX_ANY-LABEL: zext_i8: +; AVX_ANY: # BB#0: +; AVX_ANY-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX_ANY-NEXT: vpinsrb $0, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX_ANY-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX_ANY-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX_ANY-NEXT: vmovd %xmm0, %eax +; AVX_ANY-NEXT: vpextrw $2, %xmm0, %edx +; AVX_ANY-NEXT: vpextrw $4, %xmm0, %ecx +; AVX_ANY-NEXT: # kill: %AX %AX %EAX +; AVX_ANY-NEXT: # kill: %DX %DX %EDX +; AVX_ANY-NEXT: # kill: %CX %CX %ECX +; AVX_ANY-NEXT: retl +; +; AVX_X86_64-LABEL: zext_i8: +; AVX_X86_64: # BB#0: +; AVX_X86_64-NEXT: vmovd %edi, %xmm0 +; AVX_X86_64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX_X86_64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX_X86_64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX_X86_64-NEXT: vmovd %xmm0, %eax +; AVX_X86_64-NEXT: vpextrw $2, %xmm0, %edx +; AVX_X86_64-NEXT: vpextrw $4, %xmm0, %ecx +; AVX_X86_64-NEXT: # kill: %AX %AX %EAX +; AVX_X86_64-NEXT: # kill: %DX %DX %EDX +; AVX_X86_64-NEXT: # kill: %CX %CX %ECX +; AVX_X86_64-NEXT: retq + %2 = zext <3 x i8> %0 to <3 x i16> + ret <3 x i16> %2 +} + +define <3 x i16> @sext_i8(<3 x i8>) { +; SSE3-LABEL: sext_i8: +; SSE3: # BB#0: +; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; SSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; SSE3-NEXT: pinsrw $1, %eax, %xmm0 +; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; SSE3-NEXT: pinsrw $2, %eax, %xmm0 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE3-NEXT: psrad $16, %xmm0 +; SSE3-NEXT: movd %xmm0, %eax +; SSE3-NEXT: pextrw $2, %xmm0, %edx +; SSE3-NEXT: pextrw $4, %xmm0, %ecx +; SSE3-NEXT: # kill: %AX %AX %EAX +; SSE3-NEXT: # kill: %DX %DX %EDX +; SSE3-NEXT: # kill: %CX %CX %ECX +; SSE3-NEXT: retl +; +; SSE41-LABEL: sext_i8: +; SSE41: # BB#0: +; SSE41-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0 +; SSE41-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0 +; SSE41-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0 +; SSE41-NEXT: pslld $24, %xmm0 +; SSE41-NEXT: psrad $24, %xmm0 +; SSE41-NEXT: movd %xmm0, %eax +; SSE41-NEXT: pextrw $2, %xmm0, %edx +; SSE41-NEXT: pextrw $4, %xmm0, %ecx +; SSE41-NEXT: # kill: %AX %AX %EAX +; SSE41-NEXT: # kill: %DX %DX %EDX +; SSE41-NEXT: # kill: %CX %CX %ECX +; SSE41-NEXT: retl +; +; AVX_ANY-LABEL: sext_i8: +; AVX_ANY: # BB#0: +; AVX_ANY-NEXT: vpinsrb $0, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX_ANY-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX_ANY-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX_ANY-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX_ANY-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX_ANY-NEXT: vmovd %xmm0, %eax +; AVX_ANY-NEXT: vpextrw $2, %xmm0, %edx +; AVX_ANY-NEXT: vpextrw $4, %xmm0, %ecx +; AVX_ANY-NEXT: # kill: %AX %AX %EAX +; AVX_ANY-NEXT: # kill: %DX %DX %EDX +; AVX_ANY-NEXT: # kill: %CX %CX %ECX +; AVX_ANY-NEXT: retl +; +; AVX_X86_64-LABEL: sext_i8: +; AVX_X86_64: # BB#0: +; AVX_X86_64-NEXT: vmovd %edi, %xmm0 +; AVX_X86_64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX_X86_64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX_X86_64-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX_X86_64-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX_X86_64-NEXT: vmovd %xmm0, %eax +; AVX_X86_64-NEXT: vpextrw $2, %xmm0, %edx +; AVX_X86_64-NEXT: vpextrw $4, %xmm0, %ecx +; AVX_X86_64-NEXT: # kill: %AX %AX %EAX +; AVX_X86_64-NEXT: # kill: %DX %DX %EDX +; AVX_X86_64-NEXT: # kill: %CX %CX %ECX +; AVX_X86_64-NEXT: retq + %2 = sext <3 x i8> %0 to <3 x i16> + ret <3 x i16> %2 +}