Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -24182,6 +24182,59 @@ return SDValue(); } +static SDValue PerformSRACombine(SDNode *N, SelectionDAG &DAG) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + EVT VT = N0.getValueType(); + unsigned Size = VT.getSizeInBits(); + + // fold (ashr (shl, a, [56,48,32,24,16]), SarConst) + // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or + // into (lshr, (sext (a), SarConst - [56,48,32,24,16])) + // depending on sign of (SarConst - [56,48,32,24,16]) + + // sexts in X86 are MOVs. The MOVs have the same code size + // as above SHIFTs (only SHIFT on 1 has lower code size). + // However the MOVs have 2 advantages to a SHIFT: + // 1. MOVs can write to a register that differs from source + // 2. MOVs accept memory operands + + if (!VT.isInteger() || VT.isVector() || N1.getOpcode() != ISD::Constant || + N0.getOpcode() != ISD::SHL || + N0.getOperand(1).getOpcode() != ISD::Constant) + return SDValue(); + + SDValue N00 = N0.getOperand(0); + SDValue N01 = N0.getOperand(1); + APInt ShlConst = (cast(N01))->getAPIntValue(); + APInt SarConst = (cast(N1))->getAPIntValue(); + EVT CVT = N1.getValueType(); + + if (SarConst.isNegative()) + return SDValue(); + + for (MVT SVT : MVT::integer_valuetypes()) { + unsigned ShiftSize = SVT.getSizeInBits(); + // skipping types without corresponding sext/zext and + // ShlConst that is not one of [56,48,32,24,16] + if (ShiftSize < 8 || ShiftSize > 64 || ShlConst != Size - ShiftSize) + continue; + SDLoc DL(N); + SDValue NN = + DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT)); + SarConst = SarConst - (Size - ShiftSize); + if (SarConst == 0) + return NN; + else if (SarConst.isNegative()) + return DAG.getNode(ISD::SHL, DL, VT, NN, + DAG.getConstant(-SarConst, DL, CVT)); + else + return DAG.getNode(ISD::SRA, DL, VT, NN, + DAG.getConstant(SarConst, DL, CVT)); + } + return SDValue(); +} + /// \brief Returns a vector of 0s if the node in input is a vector logical /// shift by a constant amount which is known to be bigger than or equal /// to the vector element size in bits. @@ -24220,6 +24273,10 @@ if (SDValue V = PerformSHLCombine(N, DAG)) return V; + if (N->getOpcode() == ISD::SRA) + if (SDValue V = PerformSRACombine(N, DAG)) + return V; + // Try to fold this logical shift into a zero vector. if (N->getOpcode() != ISD::SRA) if (SDValue V = performShiftToAllZeros(N, DAG, Subtarget)) Index: test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll =================================================================== --- test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll +++ test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll @@ -4,15 +4,23 @@ ; a shr (X, -8) that gets subsequently "optimized away" as undef ; PR4254 +; after fixing PR24373 +; shlq $56, %rdi +; sarq $48, %rdi +; folds into +; movsbq %dil, %rax +; shlq $8, %rax +; which is better for x86 + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "x86_64-unknown-linux-gnu" define i64 @foo(i64 %b) nounwind readnone { entry: ; CHECK-LABEL: foo: -; CHECK: shlq $56, %rdi -; CHECK: sarq $48, %rdi -; CHECK: leaq 1(%rdi), %rax +; CHECK: movsbq %dil, %rax +; CHECK: shlq $8, %rax +; CHECK: orq $1, %rax %shl = shl i64 %b, 56 ; [#uses=1] %shr = ashr i64 %shl, 48 ; [#uses=1] %add5 = or i64 %shr, 1 ; [#uses=1] Index: test/CodeGen/X86/sar_fold.ll =================================================================== --- test/CodeGen/X86/sar_fold.ll +++ test/CodeGen/X86/sar_fold.ll @@ -0,0 +1,37 @@ +; RUN: llc < %s -O2 -march=x86 | FileCheck %s + +define i32 @shl16sar15(i32 %a) #0 { +; CHECK-LABEL: shl16sar15: +; CHECK: # BB#0: +; CHECK-NEXT: movswl {{[0-9]+}}(%esp), %eax + %1 = shl i32 %a, 16 + %2 = ashr exact i32 %1, 15 + ret i32 %2 +} + +define i32 @shl16sar17(i32 %a) #0 { +; CHECK-LABEL: shl16sar17: +; CHECK: # BB#0: +; CHECK-NEXT: movswl {{[0-9]+}}(%esp), %eax + %1 = shl i32 %a, 16 + %2 = ashr exact i32 %1, 17 + ret i32 %2 +} + +define i32 @shl24sar23(i32 %a) #0 { +; CHECK-LABEL: shl24sar23: +; CHECK: # BB#0: +; CHECK-NEXT: movsbl {{[0-9]+}}(%esp), %eax + %1 = shl i32 %a, 24 + %2 = ashr exact i32 %1, 23 + ret i32 %2 +} + +define i32 @shl24sar25(i32 %a) #0 { +; CHECK-LABEL: shl24sar25: +; CHECK: # BB#0: +; CHECK-NEXT: movsbl {{[0-9]+}}(%esp), %eax + %1 = shl i32 %a, 24 + %2 = ashr exact i32 %1, 25 + ret i32 %2 +} Index: test/CodeGen/X86/sar_fold64.ll =================================================================== --- test/CodeGen/X86/sar_fold64.ll +++ test/CodeGen/X86/sar_fold64.ll @@ -0,0 +1,43 @@ +; RUN: llc < %s -O2 -march=x86-64 | FileCheck %s + +define i32 @shl48sar47(i64 %a) #0 { +; CHECK-LABEL: shl48sar47: +; CHECK: # BB#0: +; CHECK-NEXT: movswq %{{[cd][xi]}}, %rax + %1 = shl i64 %a, 48 + %2 = ashr exact i64 %1, 47 + %3 = trunc i64 %2 to i32 + ret i32 %3 +} + +define i32 @shl48sar49(i64 %a) #0 { +; CHECK-LABEL: shl48sar49: +; CHECK: # BB#0: +; CHECK-NEXT: movswq %{{[cd][xi]}}, %rax + %1 = shl i64 %a, 48 + %2 = ashr exact i64 %1, 49 + %3 = trunc i64 %2 to i32 + ret i32 %3 +} + +define i32 @shl56sar55(i64 %a) #0 { +; CHECK-LABEL: shl56sar55: +; CHECK: # BB#0: +; CHECK-NEXT: movsbq %{{[cdi]*l}}, %rax + %1 = shl i64 %a, 56 + %2 = ashr exact i64 %1, 55 + %3 = trunc i64 %2 to i32 + ret i32 %3 +} + +define i32 @shl56sar57(i64 %a) #0 { +; CHECK-LABEL: shl56sar57: +; CHECK: # BB#0: +; CHECK-NEXT: movsbq %{{[cdi]*l}}, %rax + %1 = shl i64 %a, 56 + %2 = ashr exact i64 %1, 57 + %3 = trunc i64 %2 to i32 + ret i32 %3 +} + +attributes #0 = { nounwind } Index: test/CodeGen/X86/vector-sext.ll =================================================================== --- test/CodeGen/X86/vector-sext.ll +++ test/CodeGen/X86/vector-sext.ll @@ -1068,10 +1068,9 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) { ; SSE2-LABEL: load_sext_8i1_to_8i16: ; SSE2: # BB#0: # %entry -; SSE2-NEXT: movzbl (%rdi), %eax +; SSE2-NEXT: movsbq (%rdi), %rax ; SSE2-NEXT: movq %rax, %rcx -; SSE2-NEXT: shlq $56, %rcx -; SSE2-NEXT: sarq $63, %rcx +; SSE2-NEXT: shrq $7, %rcx ; SSE2-NEXT: movd %ecx, %xmm0 ; SSE2-NEXT: movq %rax, %rcx ; SSE2-NEXT: shlq $60, %rcx @@ -1111,10 +1110,9 @@ ; ; SSSE3-LABEL: load_sext_8i1_to_8i16: ; SSSE3: # BB#0: # %entry -; SSSE3-NEXT: movzbl (%rdi), %eax +; SSSE3-NEXT: movsbq (%rdi), %rax ; SSSE3-NEXT: movq %rax, %rcx -; SSSE3-NEXT: shlq $56, %rcx -; SSSE3-NEXT: sarq $63, %rcx +; SSSE3-NEXT: shrq $7, %rcx ; SSSE3-NEXT: movd %ecx, %xmm0 ; SSSE3-NEXT: movq %rax, %rcx ; SSSE3-NEXT: shlq $60, %rcx @@ -1154,7 +1152,7 @@ ; ; SSE41-LABEL: load_sext_8i1_to_8i16: ; SSE41: # BB#0: # %entry -; SSE41-NEXT: movzbl (%rdi), %eax +; SSE41-NEXT: movsbq (%rdi), %rax ; SSE41-NEXT: movq %rax, %rcx ; SSE41-NEXT: shlq $62, %rcx ; SSE41-NEXT: sarq $63, %rcx @@ -1183,14 +1181,13 @@ ; SSE41-NEXT: shlq $57, %rcx ; SSE41-NEXT: sarq $63, %rcx ; SSE41-NEXT: pinsrw $6, %ecx, %xmm0 -; SSE41-NEXT: shlq $56, %rax -; SSE41-NEXT: sarq $63, %rax +; SSE41-NEXT: shrq $7, %rax ; SSE41-NEXT: pinsrw $7, %eax, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: load_sext_8i1_to_8i16: ; AVX: # BB#0: # %entry -; AVX-NEXT: movzbl (%rdi), %eax +; AVX-NEXT: movsbq (%rdi), %rax ; AVX-NEXT: movq %rax, %rcx ; AVX-NEXT: shlq $62, %rcx ; AVX-NEXT: sarq $63, %rcx @@ -1219,15 +1216,14 @@ ; AVX-NEXT: shlq $57, %rcx ; AVX-NEXT: sarq $63, %rcx ; AVX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX-NEXT: shlq $56, %rax -; AVX-NEXT: sarq $63, %rax +; AVX-NEXT: shrq $7, %rax ; AVX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; X32-SSE41-LABEL: load_sext_8i1_to_8i16: ; X32-SSE41: # BB#0: # %entry ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movl (%eax), %eax +; X32-SSE41-NEXT: movsbl (%eax), %eax ; X32-SSE41-NEXT: movl %eax, %ecx ; X32-SSE41-NEXT: shll $30, %ecx ; X32-SSE41-NEXT: sarl $31, %ecx @@ -1256,8 +1252,7 @@ ; X32-SSE41-NEXT: shll $25, %ecx ; X32-SSE41-NEXT: sarl $31, %ecx ; X32-SSE41-NEXT: pinsrw $6, %ecx, %xmm0 -; X32-SSE41-NEXT: shll $24, %eax -; X32-SSE41-NEXT: sarl $31, %eax +; X32-SSE41-NEXT: shrl $7, %eax ; X32-SSE41-NEXT: pinsrw $7, %eax, %xmm0 ; X32-SSE41-NEXT: retl entry: @@ -1444,7 +1439,7 @@ ; ; AVX1-LABEL: load_sext_8i1_to_8i32: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: movzbl (%rdi), %eax +; AVX1-NEXT: movsbq (%rdi), %rax ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: shlq $58, %rcx ; AVX1-NEXT: sarq $63, %rcx @@ -1458,8 +1453,7 @@ ; AVX1-NEXT: sarq $63, %rcx ; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 ; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $56, %rcx -; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $7, %rcx ; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: shlq $62, %rcx @@ -1481,7 +1475,7 @@ ; ; AVX2-LABEL: load_sext_8i1_to_8i32: ; AVX2: # BB#0: # %entry -; AVX2-NEXT: movzbl (%rdi), %eax +; AVX2-NEXT: movsbq (%rdi), %rax ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: shlq $58, %rcx ; AVX2-NEXT: sarq $63, %rcx @@ -1495,8 +1489,7 @@ ; AVX2-NEXT: sarq $63, %rcx ; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 ; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $56, %rcx -; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: shrq $7, %rcx ; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: shlq $62, %rcx @@ -1873,140 +1866,208 @@ ; ; AVX1-LABEL: load_sext_16i1_to_16i16: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: movzwl (%rdi), %eax +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Ltmp0: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: .Ltmp1: +; AVX1-NEXT: .cfi_def_cfa_offset 24 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: .Ltmp2: +; AVX1-NEXT: .cfi_def_cfa_offset 32 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: .Ltmp3: +; AVX1-NEXT: .cfi_def_cfa_offset 40 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: .Ltmp4: +; AVX1-NEXT: .cfi_def_cfa_offset 48 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: .Ltmp5: +; AVX1-NEXT: .cfi_def_cfa_offset 56 +; AVX1-NEXT: .Ltmp6: +; AVX1-NEXT: .cfi_offset %rbx, -56 +; AVX1-NEXT: .Ltmp7: +; AVX1-NEXT: .cfi_offset %r12, -48 +; AVX1-NEXT: .Ltmp8: +; AVX1-NEXT: .cfi_offset %r13, -40 +; AVX1-NEXT: .Ltmp9: +; AVX1-NEXT: .cfi_offset %r14, -32 +; AVX1-NEXT: .Ltmp10: +; AVX1-NEXT: .cfi_offset %r15, -24 +; AVX1-NEXT: .Ltmp11: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movswq (%rdi), %rax ; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $54, %rcx +; AVX1-NEXT: shlq $55, %rcx ; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: shlq $55, %rdx -; AVX1-NEXT: sarq $63, %rdx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: movq %rax, %r8 +; AVX1-NEXT: movq %rax, %r10 +; AVX1-NEXT: movq %rax, %r11 +; AVX1-NEXT: movq %rax, %r14 +; AVX1-NEXT: movq %rax, %r15 +; AVX1-NEXT: movq %rax, %r9 +; AVX1-NEXT: movq %rax, %r12 +; AVX1-NEXT: movq %rax, %r13 +; AVX1-NEXT: movq %rax, %rbx +; AVX1-NEXT: movq %rax, %rdi ; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $53, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $52, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $51, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $50, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $49, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $48, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $62, %rcx -; AVX1-NEXT: sarq $63, %rcx ; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: shlq $63, %rdx -; AVX1-NEXT: sarq $63, %rdx -; AVX1-NEXT: vmovd %edx, %xmm1 -; AVX1-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $61, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $60, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: movq %rax, %rsi +; AVX1-NEXT: movsbq %al, %rbp +; AVX1-NEXT: shlq $54, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: shlq $53, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $52, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $51, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $50, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $49, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0 +; AVX1-NEXT: shrq $15, %r9 +; AVX1-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $63, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vmovd %r13d, %xmm1 +; AVX1-NEXT: shlq $62, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $61, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $60, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1 ; AVX1-NEXT: shlq $59, %rcx ; AVX1-NEXT: sarq $63, %rcx ; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $58, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $57, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $56, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: shlq $58, %rdx +; AVX1-NEXT: sarq $63, %rdx +; AVX1-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $57, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1 +; AVX1-NEXT: shrq $7, %rbp +; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_sext_16i1_to_16i16: ; AVX2: # BB#0: # %entry -; AVX2-NEXT: movzwl (%rdi), %eax +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Ltmp0: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: .Ltmp1: +; AVX2-NEXT: .cfi_def_cfa_offset 24 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: .Ltmp2: +; AVX2-NEXT: .cfi_def_cfa_offset 32 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: .Ltmp3: +; AVX2-NEXT: .cfi_def_cfa_offset 40 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: .Ltmp4: +; AVX2-NEXT: .cfi_def_cfa_offset 48 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: .Ltmp5: +; AVX2-NEXT: .cfi_def_cfa_offset 56 +; AVX2-NEXT: .Ltmp6: +; AVX2-NEXT: .cfi_offset %rbx, -56 +; AVX2-NEXT: .Ltmp7: +; AVX2-NEXT: .cfi_offset %r12, -48 +; AVX2-NEXT: .Ltmp8: +; AVX2-NEXT: .cfi_offset %r13, -40 +; AVX2-NEXT: .Ltmp9: +; AVX2-NEXT: .cfi_offset %r14, -32 +; AVX2-NEXT: .Ltmp10: +; AVX2-NEXT: .cfi_offset %r15, -24 +; AVX2-NEXT: .Ltmp11: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movswq (%rdi), %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $55, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: movq %rax, %r8 +; AVX2-NEXT: movq %rax, %r10 +; AVX2-NEXT: movq %rax, %r11 +; AVX2-NEXT: movq %rax, %r14 +; AVX2-NEXT: movq %rax, %r15 +; AVX2-NEXT: movq %rax, %r9 +; AVX2-NEXT: movq %rax, %r12 +; AVX2-NEXT: movq %rax, %r13 +; AVX2-NEXT: movq %rax, %rbx +; AVX2-NEXT: movq %rax, %rdi ; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $54, %rcx -; AVX2-NEXT: sarq $63, %rcx ; AVX2-NEXT: movq %rax, %rdx -; AVX2-NEXT: shlq $55, %rdx -; AVX2-NEXT: sarq $63, %rdx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $53, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $52, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $51, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $50, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $49, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $48, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $62, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: movq %rax, %rdx -; AVX2-NEXT: shlq $63, %rdx -; AVX2-NEXT: sarq $63, %rdx -; AVX2-NEXT: vmovd %edx, %xmm1 -; AVX2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $61, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $60, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: movq %rax, %rsi +; AVX2-NEXT: movsbq %al, %rbp +; AVX2-NEXT: shlq $54, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: shlq $53, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $52, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $51, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $50, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $49, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0 +; AVX2-NEXT: shrq $15, %r9 +; AVX2-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $63, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vmovd %r13d, %xmm1 +; AVX2-NEXT: shlq $62, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $61, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $60, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1 ; AVX2-NEXT: shlq $59, %rcx ; AVX2-NEXT: sarq $63, %rcx ; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $58, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $57, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $56, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: shlq $58, %rdx +; AVX2-NEXT: sarq $63, %rdx +; AVX2-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $57, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1 +; AVX2-NEXT: shrq $7, %rbp +; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq ; ; X32-SSE41-LABEL: load_sext_16i1_to_16i16: