diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6985,11 +6985,13 @@ EVT VT = LHS.getValueType(); // The target must have at least one rotate/funnel flavor. + // We still try to match rotate by constant pre-legalization. + // TODO: Support pre-legalization funnel-shift by constant. bool HasROTL = hasOperation(ISD::ROTL, VT); bool HasROTR = hasOperation(ISD::ROTR, VT); bool HasFSHL = hasOperation(ISD::FSHL, VT); bool HasFSHR = hasOperation(ISD::FSHR, VT); - if (!HasROTL && !HasROTR && !HasFSHL && !HasFSHR) + if (LegalOperations && !HasROTL && !HasROTR && !HasFSHL && !HasFSHR) return SDValue(); // Check for truncated rotate. @@ -7043,6 +7045,8 @@ return SDValue(); // Shifts must disagree. bool IsRotate = LHSShift.getOperand(0) == RHSShift.getOperand(0); + + // TODO: Support pre-legalization funnel-shift by constant. if (!IsRotate && !(HasFSHL || HasFSHR)) return SDValue(); // Requires funnel shift support. @@ -7070,12 +7074,15 @@ }; if (ISD::matchBinaryPredicate(LHSShiftAmt, RHSShiftAmt, MatchRotateSum)) { SDValue Res; - if (IsRotate && (HasROTL || HasROTR)) - Res = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg, - HasROTL ? LHSShiftAmt : RHSShiftAmt); - else - Res = DAG.getNode(HasFSHL ? ISD::FSHL : ISD::FSHR, DL, VT, LHSShiftArg, - RHSShiftArg, HasFSHL ? LHSShiftAmt : RHSShiftAmt); + if (IsRotate && (HasROTL || HasROTR || !(HasFSHL || HasFSHR))) { + bool UseROTL = !LegalOperations || HasROTL; + Res = DAG.getNode(UseROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg, + UseROTL ? LHSShiftAmt : RHSShiftAmt); + } else { + bool UseFSHL = !LegalOperations || HasFSHL; + Res = DAG.getNode(UseFSHL ? ISD::FSHL : ISD::FSHR, DL, VT, LHSShiftArg, + RHSShiftArg, UseFSHL ? LHSShiftAmt : RHSShiftAmt); + } // If there is an AND of either shifted operand, apply it to the result. if (LHSMask.getNode() || RHSMask.getNode()) { @@ -7099,6 +7106,11 @@ return Res; } + // Even pre-legalization, we can't easily rotate/funnel-shift by a variable + // shift. + if (!HasROTL && !HasROTR && !HasFSHL && !HasFSHR) + return SDValue(); + // If there is a mask here, and we have a variable shift, we can't be sure // that we're masking out the right stuff. if (LHSMask.getNode() || RHSMask.getNode()) diff --git a/llvm/test/CodeGen/ARM/rotate.ll b/llvm/test/CodeGen/ARM/rotate.ll --- a/llvm/test/CodeGen/ARM/rotate.ll +++ b/llvm/test/CodeGen/ARM/rotate.ll @@ -7,8 +7,8 @@ ; CHECK-LABEL: testcase: ; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vshl.i64 q9, q8, #56 -; CHECK-NEXT: vshr.u64 q8, q8, #8 +; CHECK-NEXT: vshr.u64 q9, q8, #8 +; CHECK-NEXT: vshl.i64 q8, q8, #56 ; CHECK-NEXT: vorr q0, q8, q9 ; CHECK-NEXT: bx lr %1 = load <2 x i64>, <2 x i64>* %in diff --git a/llvm/test/CodeGen/PowerPC/vector-rotates.ll b/llvm/test/CodeGen/PowerPC/vector-rotates.ll --- a/llvm/test/CodeGen/PowerPC/vector-rotates.ll +++ b/llvm/test/CodeGen/PowerPC/vector-rotates.ll @@ -110,23 +110,16 @@ ; ; CHECK-P7-LABEL: rotl_v2i64: ; CHECK-P7: # %bb.0: # %entry -; CHECK-P7-NEXT: addi r3, r1, -48 -; CHECK-P7-NEXT: stxvd2x vs34, 0, r3 -; CHECK-P7-NEXT: ld r3, -40(r1) -; CHECK-P7-NEXT: sldi r4, r3, 53 -; CHECK-P7-NEXT: rldicl r3, r3, 53, 11 -; CHECK-P7-NEXT: std r4, -8(r1) -; CHECK-P7-NEXT: ld r4, -48(r1) -; CHECK-P7-NEXT: sldi r5, r4, 41 -; CHECK-P7-NEXT: rldicl r4, r4, 41, 23 -; CHECK-P7-NEXT: std r5, -16(r1) -; CHECK-P7-NEXT: addi r5, r1, -16 -; CHECK-P7-NEXT: lxvw4x vs0, 0, r5 -; CHECK-P7-NEXT: std r3, -24(r1) ; CHECK-P7-NEXT: addi r3, r1, -32 -; CHECK-P7-NEXT: std r4, -32(r1) -; CHECK-P7-NEXT: lxvw4x vs1, 0, r3 -; CHECK-P7-NEXT: xxlor vs34, vs0, vs1 +; CHECK-P7-NEXT: stxvd2x vs34, 0, r3 +; CHECK-P7-NEXT: ld r3, -24(r1) +; CHECK-P7-NEXT: rotldi r3, r3, 53 +; CHECK-P7-NEXT: std r3, -8(r1) +; CHECK-P7-NEXT: ld r3, -32(r1) +; CHECK-P7-NEXT: rotldi r3, r3, 41 +; CHECK-P7-NEXT: std r3, -16(r1) +; CHECK-P7-NEXT: addi r3, r1, -16 +; CHECK-P7-NEXT: lxvd2x vs34, 0, r3 ; CHECK-P7-NEXT: blr entry: %b = shl <2 x i64> %a, diff --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll --- a/llvm/test/CodeGen/RISCV/rv32zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll @@ -1642,9 +1642,9 @@ define i32 @grev16_i32(i32 %a) nounwind { ; RV32I-LABEL: grev16_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a1, a0, 16 -; RV32I-NEXT: srli a0, a0, 16 -; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 16 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: grev16_i32: @@ -2045,12 +2045,12 @@ define i64 @grev16_i64(i64 %a) nounwind { ; RV32I-LABEL: grev16_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a2, a1, 16 -; RV32I-NEXT: srli a3, a0, 16 +; RV32I-NEXT: srli a2, a0, 16 ; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: or a0, a0, a3 -; RV32I-NEXT: srli a1, a1, 16 -; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: srli a2, a1, 16 +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: grev16_i64: diff --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll @@ -1853,14 +1853,14 @@ define signext i32 @grev16_i32(i32 signext %a) nounwind { ; RV64I-LABEL: grev16_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 16 -; RV64I-NEXT: srliw a0, a0, 16 -; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srliw a1, a0, 16 +; RV64I-NEXT: slliw a0, a0, 16 +; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: grev16_i32: ; RV64ZBP: # %bb.0: -; RV64ZBP-NEXT: greviw a0, a0, 16 +; RV64ZBP-NEXT: roriw a0, a0, 16 ; RV64ZBP-NEXT: ret %shl = shl i32 %a, 16 %shr = lshr i32 %a, 16 @@ -1935,9 +1935,9 @@ define i64 @grev32(i64 %a) nounwind { ; RV64I-LABEL: grev32: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: grev32: diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -367,19 +367,17 @@ ; RV32-NEXT: neg a4, a2 ; RV32-NEXT: neg a5, a0 ; RV32-NEXT: sw a5, 0(s0) +; RV32-NEXT: andi a4, a4, 7 +; RV32-NEXT: sb a4, 12(s0) ; RV32-NEXT: slli a3, a3, 1 ; RV32-NEXT: sub a0, a0, a3 ; RV32-NEXT: sw a0, 4(s0) -; RV32-NEXT: slli a0, a2, 2 -; RV32-NEXT: srli a2, a4, 30 -; RV32-NEXT: sub a2, a2, a0 -; RV32-NEXT: andi a2, a2, 7 -; RV32-NEXT: sb a2, 12(s0) -; RV32-NEXT: srli a2, a1, 31 +; RV32-NEXT: srli a0, a1, 31 ; RV32-NEXT: andi a1, a1, 1 ; RV32-NEXT: slli a1, a1, 1 -; RV32-NEXT: or a1, a2, a1 -; RV32-NEXT: sub a0, a1, a0 +; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: slli a1, a2, 2 +; RV32-NEXT: sub a0, a0, a1 ; RV32-NEXT: sw a0, 8(s0) ; RV32-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s5, 4(sp) # 4-byte Folded Reload @@ -541,19 +539,17 @@ ; RV32M-NEXT: neg a4, a2 ; RV32M-NEXT: neg a5, a0 ; RV32M-NEXT: sw a5, 0(s0) +; RV32M-NEXT: andi a4, a4, 7 +; RV32M-NEXT: sb a4, 12(s0) ; RV32M-NEXT: slli a3, a3, 1 ; RV32M-NEXT: sub a0, a0, a3 ; RV32M-NEXT: sw a0, 4(s0) -; RV32M-NEXT: slli a0, a2, 2 -; RV32M-NEXT: srli a2, a4, 30 -; RV32M-NEXT: sub a2, a2, a0 -; RV32M-NEXT: andi a2, a2, 7 -; RV32M-NEXT: sb a2, 12(s0) -; RV32M-NEXT: srli a2, a1, 31 +; RV32M-NEXT: srli a0, a1, 31 ; RV32M-NEXT: andi a1, a1, 1 ; RV32M-NEXT: slli a1, a1, 1 -; RV32M-NEXT: or a1, a2, a1 -; RV32M-NEXT: sub a0, a1, a0 +; RV32M-NEXT: or a0, a0, a1 +; RV32M-NEXT: slli a1, a2, 2 +; RV32M-NEXT: sub a0, a0, a1 ; RV32M-NEXT: sw a0, 8(s0) ; RV32M-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32M-NEXT: lw s5, 4(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/X86/rotate-extract-vector.ll b/llvm/test/CodeGen/X86/rotate-extract-vector.ll --- a/llvm/test/CodeGen/X86/rotate-extract-vector.ll +++ b/llvm/test/CodeGen/X86/rotate-extract-vector.ll @@ -132,18 +132,18 @@ define <32 x i16> @illegal_no_extract_mul(<32 x i16> %i) nounwind { ; X86-LABEL: illegal_no_extract_mul: ; X86: # %bb.0: -; X86-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm1 ; X86-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 -; X86-NEXT: vpsrlw $10, %zmm0, %zmm0 -; X86-NEXT: vporq %zmm0, %zmm1, %zmm0 +; X86-NEXT: vpsrlw $10, %zmm0, %zmm1 +; X86-NEXT: vpsllw $6, %zmm0, %zmm0 +; X86-NEXT: vporq %zmm1, %zmm0, %zmm0 ; X86-NEXT: retl ; ; X64-LABEL: illegal_no_extract_mul: ; X64: # %bb.0: -; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 ; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; X64-NEXT: vpsrlw $10, %zmm0, %zmm0 -; X64-NEXT: vporq %zmm0, %zmm1, %zmm0 +; X64-NEXT: vpsrlw $10, %zmm0, %zmm1 +; X64-NEXT: vpsllw $6, %zmm0, %zmm0 +; X64-NEXT: vporq %zmm1, %zmm0, %zmm0 ; X64-NEXT: retq %lhs_mul = mul <32 x i16> %i, %rhs_mul = mul <32 x i16> %i, diff --git a/llvm/test/CodeGen/X86/rotate-extract.ll b/llvm/test/CodeGen/X86/rotate-extract.ll --- a/llvm/test/CodeGen/X86/rotate-extract.ll +++ b/llvm/test/CodeGen/X86/rotate-extract.ll @@ -12,13 +12,13 @@ define i64 @rolq_extract_shl(i64 %i) nounwind { ; X86-LABEL: rolq_extract_shl: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: leal (,%edx,8), %eax -; X86-NEXT: shldl $10, %ecx, %edx -; X86-NEXT: shll $10, %ecx -; X86-NEXT: shrl $25, %eax -; X86-NEXT: orl %ecx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: shldl $3, %edx, %ecx +; X86-NEXT: shll $3, %edx +; X86-NEXT: movl %edx, %eax +; X86-NEXT: shldl $7, %ecx, %eax +; X86-NEXT: shrdl $25, %ecx, %edx ; X86-NEXT: retl ; ; X64-LABEL: rolq_extract_shl: @@ -105,21 +105,14 @@ define i64 @rolq_extract_mul_with_mask(i64 %i) nounwind { ; X86-LABEL: rolq_extract_mul_with_mask: ; X86: # %bb.0: -; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %eax, %ecx -; X86-NEXT: shll $7, %ecx -; X86-NEXT: leal (%ecx,%ecx,8), %ecx -; X86-NEXT: movl $9, %edx -; X86-NEXT: mull %edx -; X86-NEXT: leal (%esi,%esi,8), %eax -; X86-NEXT: addl %edx, %eax -; X86-NEXT: movzbl %cl, %ecx -; X86-NEXT: shrl $25, %eax -; X86-NEXT: orl %ecx, %eax +; X86-NEXT: leal (%eax,%eax,8), %ecx +; X86-NEXT: movl $9, %eax +; X86-NEXT: mull {{[0-9]+}}(%esp) +; X86-NEXT: addl %ecx, %edx +; X86-NEXT: shrdl $25, %eax, %edx +; X86-NEXT: movzbl %dl, %eax ; X86-NEXT: xorl %edx, %edx -; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: rolq_extract_mul_with_mask: diff --git a/llvm/test/CodeGen/X86/rotate.ll b/llvm/test/CodeGen/X86/rotate.ll --- a/llvm/test/CodeGen/X86/rotate.ll +++ b/llvm/test/CodeGen/X86/rotate.ll @@ -113,11 +113,11 @@ define i64 @rotli64(i64 %A) nounwind { ; X86-LABEL: rotli64: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, %edx -; X86-NEXT: shldl $5, %eax, %edx -; X86-NEXT: shldl $5, %ecx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: shldl $5, %edx, %eax +; X86-NEXT: shldl $5, %ecx, %edx ; X86-NEXT: retl ; ; X64-LABEL: rotli64: @@ -155,11 +155,11 @@ define i64 @rotl1_64(i64 %A) nounwind { ; X86-LABEL: rotl1_64: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, %edx -; X86-NEXT: shldl $1, %eax, %edx -; X86-NEXT: shldl $1, %ecx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: shldl $1, %edx, %eax +; X86-NEXT: shldl $1, %ecx, %edx ; X86-NEXT: retl ; ; X64-LABEL: rotl1_64: @@ -569,11 +569,11 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl (%eax), %ecx ; X86-NEXT: movl 4(%eax), %edx -; X86-NEXT: movl %edx, %esi -; X86-NEXT: shldl $31, %ecx, %esi -; X86-NEXT: shldl $31, %edx, %ecx -; X86-NEXT: movl %esi, (%eax) -; X86-NEXT: movl %ecx, 4(%eax) +; X86-NEXT: movl %ecx, %esi +; X86-NEXT: shldl $31, %edx, %esi +; X86-NEXT: shldl $31, %ecx, %edx +; X86-NEXT: movl %edx, (%eax) +; X86-NEXT: movl %esi, 4(%eax) ; X86-NEXT: popl %esi ; X86-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/rotate2.ll b/llvm/test/CodeGen/X86/rotate2.ll --- a/llvm/test/CodeGen/X86/rotate2.ll +++ b/llvm/test/CodeGen/X86/rotate2.ll @@ -5,11 +5,11 @@ define i64 @test1(i64 %x) nounwind { ; X86-LABEL: test1: ; X86: # %bb.0: # %entry -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, %edx -; X86-NEXT: shldl $9, %eax, %edx -; X86-NEXT: shldl $9, %ecx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: shldl $9, %edx, %eax +; X86-NEXT: shldl $9, %ecx, %edx ; X86-NEXT: retl ; ; X64-LABEL: test1: diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -1323,13 +1323,13 @@ ; SSE2-LABEL: constant_rotate_v2i64: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psllq $4, %xmm1 +; SSE2-NEXT: psrlq $60, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: psllq $14, %xmm2 +; SSE2-NEXT: psrlq $50, %xmm2 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrlq $60, %xmm1 -; SSE2-NEXT: psrlq $50, %xmm0 +; SSE2-NEXT: psllq $4, %xmm1 +; SSE2-NEXT: psllq $14, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: orpd %xmm2, %xmm0 ; SSE2-NEXT: retq @@ -1337,33 +1337,33 @@ ; SSE41-LABEL: constant_rotate_v2i64: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psllq $14, %xmm1 +; SSE41-NEXT: psrlq $50, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: psllq $4, %xmm2 +; SSE41-NEXT: psrlq $60, %xmm2 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psrlq $50, %xmm1 -; SSE41-NEXT: psrlq $60, %xmm0 +; SSE41-NEXT: psllq $14, %xmm1 +; SSE41-NEXT: psllq $4, %xmm0 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: por %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: constant_rotate_v2i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $14, %xmm0, %xmm1 -; AVX1-NEXT: vpsllq $4, %xmm0, %xmm2 +; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $60, %xmm0, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm2 -; AVX1-NEXT: vpsrlq $60, %xmm0, %xmm0 +; AVX1-NEXT: vpsllq $14, %xmm0, %xmm2 +; AVX1-NEXT: vpsllq $4, %xmm0, %xmm0 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: constant_rotate_v2i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; AVX2-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVX2-NEXT: vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: constant_rotate_v2i64: @@ -1416,13 +1416,13 @@ ; X86-SSE2-LABEL: constant_rotate_v2i64: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE2-NEXT: psllq $4, %xmm1 +; X86-SSE2-NEXT: psrlq $60, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 -; X86-SSE2-NEXT: psllq $14, %xmm2 +; X86-SSE2-NEXT: psrlq $50, %xmm2 ; X86-SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE2-NEXT: psrlq $60, %xmm1 -; X86-SSE2-NEXT: psrlq $50, %xmm0 +; X86-SSE2-NEXT: psllq $4, %xmm1 +; X86-SSE2-NEXT: psllq $14, %xmm0 ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; X86-SSE2-NEXT: orpd %xmm2, %xmm0 ; X86-SSE2-NEXT: retl @@ -1723,9 +1723,9 @@ ; AVX512F-LABEL: constant_rotate_v16i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512F-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512F-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512F-NEXT: vpord %zmm0, %zmm1, %zmm0 +; AVX512F-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512F-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512F-NEXT: vpord %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -1733,21 +1733,21 @@ ; AVX512VL-LABEL: constant_rotate_v16i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512VL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512VL-NEXT: vpord %zmm0, %zmm1, %zmm0 +; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512VL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512VL-NEXT: vpord %zmm1, %zmm0, %zmm0 ; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: constant_rotate_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7] ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7] -; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper @@ -1756,21 +1756,21 @@ ; AVX512VLBW-LABEL: constant_rotate_v16i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 -; AVX512VLBW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512VLBW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 +; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512VLBW-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0 ; AVX512VLBW-NEXT: vzeroupper ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: constant_rotate_v16i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7] ; AVX512VBMI2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512VBMI2-NEXT: vpsllvw %zmm1, %zmm0, %zmm1 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7] -; AVX512VBMI2-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0 -; AVX512VBMI2-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512VBMI2-NEXT: vpsrlvw %zmm1, %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; AVX512VBMI2-NEXT: vpsllvw %zmm2, %zmm0, %zmm0 +; AVX512VBMI2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512VBMI2-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VBMI2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VBMI2-NEXT: vzeroupper @@ -1779,9 +1779,9 @@ ; AVX512VLVBMI2-LABEL: constant_rotate_v16i8: ; AVX512VLVBMI2: # %bb.0: ; AVX512VLVBMI2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 -; AVX512VLVBMI2-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX512VLVBMI2-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512VLVBMI2-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 +; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512VLVBMI2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512VLVBMI2-NEXT: vpmovwb %ymm0, %xmm0 ; AVX512VLVBMI2-NEXT: vzeroupper ; AVX512VLVBMI2-NEXT: retq @@ -1828,16 +1828,16 @@ ; SSE-LABEL: splatconstant_rotate_v2i64: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psllq $14, %xmm1 -; SSE-NEXT: psrlq $50, %xmm0 +; SSE-NEXT: psrlq $50, %xmm1 +; SSE-NEXT: psllq $14, %xmm0 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_rotate_v2i64: ; AVX: # %bb.0: -; AVX-NEXT: vpsllq $14, %xmm0, %xmm1 -; AVX-NEXT: vpsrlq $50, %xmm0, %xmm0 -; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpsrlq $50, %xmm0, %xmm1 +; AVX-NEXT: vpsllq $14, %xmm0, %xmm0 +; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512F-LABEL: splatconstant_rotate_v2i64: @@ -1887,8 +1887,8 @@ ; X86-SSE2-LABEL: splatconstant_rotate_v2i64: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE2-NEXT: psllq $14, %xmm1 -; X86-SSE2-NEXT: psrlq $50, %xmm0 +; X86-SSE2-NEXT: psrlq $50, %xmm1 +; X86-SSE2-NEXT: psllq $14, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 ; X86-SSE2-NEXT: retl %shl = shl <2 x i64> %a, @@ -2067,11 +2067,11 @@ ; ; AVX512F-LABEL: splatconstant_rotate_v16i8: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm1 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm0 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX512F-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_v16i8: @@ -2083,11 +2083,11 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX512BW-NEXT: vpsrlw $4, %xmm0, %xmm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX512BW-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512BW-NEXT: vpsllw $4, %xmm0, %xmm0 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_v16i8: @@ -2099,11 +2099,11 @@ ; ; AVX512VBMI2-LABEL: splatconstant_rotate_v16i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX512VBMI2-NEXT: vpsrlw $4, %xmm0, %xmm1 ; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX512VBMI2-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512VBMI2-NEXT: vpsllw $4, %xmm0, %xmm0 ; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX512VBMI2-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512VBMI2-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: splatconstant_rotate_v16i8: @@ -2401,53 +2401,56 @@ ; ; AVX512F-LABEL: splatconstant_rotate_mask_v16i8: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm1 -; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm0 -; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm1 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX512F-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm0 +; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_mask_v16i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsllw $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0 -; AVX512VL-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0 +; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512VL-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0 +; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_rotate_mask_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpsllw $4, %xmm0, %xmm1 -; AVX512BW-NEXT: vpsrlw $4, %xmm0, %xmm0 -; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpsrlw $4, %xmm0, %xmm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512BW-NEXT: vpsllw $4, %xmm0, %xmm0 +; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_mask_v16i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsllw $4, %xmm0, %xmm1 -; AVX512VLBW-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0 -; AVX512VLBW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0 +; AVX512VLBW-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512VLBW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0 +; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: splatconstant_rotate_mask_v16i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vpsllw $4, %xmm0, %xmm1 -; AVX512VBMI2-NEXT: vpsrlw $4, %xmm0, %xmm0 -; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512VBMI2-NEXT: vpsrlw $4, %xmm0, %xmm1 ; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX512VBMI2-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512VBMI2-NEXT: vpsllw $4, %xmm0, %xmm0 +; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512VBMI2-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: splatconstant_rotate_mask_v16i8: ; AVX512VLVBMI2: # %bb.0: ; AVX512VLVBMI2-NEXT: vpsllw $4, %xmm0, %xmm1 -; AVX512VLVBMI2-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0 -; AVX512VLVBMI2-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512VLVBMI2-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0 +; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512VLVBMI2-NEXT: retq ; ; XOP-LABEL: splatconstant_rotate_mask_v16i8: diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll --- a/llvm/test/CodeGen/X86/vector-rotate-256.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll @@ -1050,28 +1050,28 @@ ; AVX1-LABEL: constant_rotate_v4i64: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpsllq $60, %xmm1, %xmm2 -; AVX1-NEXT: vpsllq $50, %xmm1, %xmm3 +; AVX1-NEXT: vpsrlq $4, %xmm1, %xmm2 +; AVX1-NEXT: vpsrlq $14, %xmm1, %xmm3 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpsllq $14, %xmm0, %xmm3 -; AVX1-NEXT: vpsllq $4, %xmm0, %xmm4 +; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq $60, %xmm0, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; AVX1-NEXT: vpsrlq $4, %xmm1, %xmm3 -; AVX1-NEXT: vpsrlq $14, %xmm1, %xmm1 +; AVX1-NEXT: vpsllq $60, %xmm1, %xmm3 +; AVX1-NEXT: vpsllq $50, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] -; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm3 -; AVX1-NEXT: vpsrlq $60, %xmm0, %xmm0 +; AVX1-NEXT: vpsllq $14, %xmm0, %xmm3 +; AVX1-NEXT: vpsllq $4, %xmm0, %xmm0 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: constant_rotate_v4i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 -; AVX2-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 +; AVX2-NEXT: vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: constant_rotate_v4i64: @@ -1437,36 +1437,36 @@ ; AVX512BW-LABEL: constant_rotate_v32i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero -; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: constant_rotate_v32i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero -; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512VLBW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: constant_rotate_v32i8: ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero -; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512VBMI2-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512VBMI2-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512VBMI2-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512VBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: constant_rotate_v32i8: ; AVX512VLVBMI2: # %bb.0: ; AVX512VLVBMI2-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero -; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512VLVBMI2-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512VLVBMI2-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLVBMI2-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VLVBMI2-NEXT: retq ; @@ -1500,21 +1500,21 @@ define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind { ; AVX1-LABEL: splatconstant_rotate_v4i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $14, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpsllq $14, %xmm2, %xmm3 +; AVX1-NEXT: vpsrlq $50, %xmm2, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlq $50, %xmm2, %xmm2 +; AVX1-NEXT: vpsllq $14, %xmm0, %xmm0 +; AVX1-NEXT: vpsllq $14, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: splatconstant_rotate_v4i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpsllq $14, %ymm0, %ymm1 -; AVX2-NEXT: vpsrlq $50, %ymm0, %ymm0 -; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpsrlq $50, %ymm0, %ymm1 +; AVX2-NEXT: vpsllq $14, %ymm0, %ymm0 +; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: splatconstant_rotate_v4i64: @@ -1777,11 +1777,11 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_v32i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpsllw $4, %ymm0, %ymm1 +; AVX512BW-NEXT: vpsrlw $4, %ymm0, %ymm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512BW-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512BW-NEXT: vpsllw $4, %ymm0, %ymm0 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_v32i8: @@ -1793,11 +1793,11 @@ ; ; AVX512VBMI2-LABEL: splatconstant_rotate_v32i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vpsllw $4, %ymm0, %ymm1 +; AVX512VBMI2-NEXT: vpsrlw $4, %ymm0, %ymm1 ; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512VBMI2-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512VBMI2-NEXT: vpsllw $4, %ymm0, %ymm0 ; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX512VBMI2-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512VBMI2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: splatconstant_rotate_v32i8: @@ -2137,36 +2137,38 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_mask_v32i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpsllw $4, %ymm0, %ymm1 -; AVX512BW-NEXT: vpsrlw $4, %ymm0, %ymm0 -; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: vpsrlw $4, %ymm0, %ymm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512BW-NEXT: vpsllw $4, %ymm0, %ymm0 +; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_mask_v32i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsllw $4, %ymm0, %ymm1 -; AVX512VLBW-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 -; AVX512VLBW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0 +; AVX512VLBW-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512VLBW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 +; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: splatconstant_rotate_mask_v32i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vpsllw $4, %ymm0, %ymm1 -; AVX512VBMI2-NEXT: vpsrlw $4, %ymm0, %ymm0 -; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512VBMI2-NEXT: vpsrlw $4, %ymm0, %ymm1 ; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512VBMI2-NEXT: vpor %ymm0, %ymm1, %ymm0 +; AVX512VBMI2-NEXT: vpsllw $4, %ymm0, %ymm0 +; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512VBMI2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: splatconstant_rotate_mask_v32i8: ; AVX512VLVBMI2: # %bb.0: ; AVX512VLVBMI2-NEXT: vpsllw $4, %ymm0, %ymm1 -; AVX512VLVBMI2-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 -; AVX512VLVBMI2-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0 +; AVX512VLVBMI2-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512VLVBMI2-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 +; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX512VLVBMI2-NEXT: retq ; ; XOPAVX1-LABEL: splatconstant_rotate_mask_v32i8: diff --git a/llvm/test/CodeGen/X86/vector-rotate-512.ll b/llvm/test/CodeGen/X86/vector-rotate-512.ll --- a/llvm/test/CodeGen/X86/vector-rotate-512.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-512.ll @@ -689,16 +689,16 @@ ; ; AVX512BW-LABEL: constant_rotate_v32i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: constant_rotate_v32i16: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512VLBW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: constant_rotate_v32i16: @@ -723,7 +723,7 @@ ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm2 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256] ; AVX512F-NEXT: # ymm4 = mem[0,1,0,1] ; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm2 ; AVX512F-NEXT: vpsllw $2, %ymm2, %ymm5 @@ -750,7 +750,7 @@ ; AVX512F-NEXT: vpmullw %ymm5, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[16],ymm3[16],ymm1[17],ymm3[17],ymm1[18],ymm3[18],ymm1[19],ymm3[19],ymm1[20],ymm3[20],ymm1[21],ymm3[21],ymm1[22],ymm3[22],ymm1[23],ymm3[23] -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128] ; AVX512F-NEXT: # ymm6 = mem[0,1,0,1] ; AVX512F-NEXT: vpmullw %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1 @@ -772,7 +772,7 @@ ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm2 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 -; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256] +; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256] ; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1] ; AVX512VL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm2 ; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm5 @@ -799,7 +799,7 @@ ; AVX512VL-NEXT: vpmullw %ymm5, %ymm4, %ymm4 ; AVX512VL-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[16],ymm3[16],ymm1[17],ymm3[17],ymm1[18],ymm3[18],ymm1[19],ymm3[19],ymm1[20],ymm3[20],ymm1[21],ymm3[21],ymm1[22],ymm3[22],ymm1[23],ymm3[23] -; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128] ; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1] ; AVX512VL-NEXT: vpmullw %ymm6, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1 @@ -817,7 +817,7 @@ ; ; AVX512BW-LABEL: constant_rotate_v64i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256] +; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256] ; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2 @@ -844,7 +844,7 @@ ; ; AVX512VLBW-LABEL: constant_rotate_v64i8: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256] +; AVX512VLBW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256] ; AVX512VLBW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1 ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm2 @@ -871,7 +871,7 @@ ; ; AVX512VBMI2-LABEL: constant_rotate_v64i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256] +; AVX512VBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256] ; AVX512VBMI2-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512VBMI2-NEXT: vpmovb2m %zmm1, %k1 ; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm2 @@ -898,7 +898,7 @@ ; ; AVX512VLVBMI2-LABEL: constant_rotate_v64i8: ; AVX512VLVBMI2: # %bb.0: -; AVX512VLVBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256] +; AVX512VLVBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256] ; AVX512VLVBMI2-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm1, %k1 ; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm2 @@ -981,16 +981,16 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_v32i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm1 -; AVX512BW-NEXT: vpsrlw $9, %zmm0, %zmm0 -; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpsrlw $9, %zmm0, %zmm1 +; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_v32i16: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: vpsllw $7, %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpsrlw $9, %zmm0, %zmm0 -; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0 +; AVX512VLBW-NEXT: vpsrlw $9, %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpsllw $7, %zmm0, %zmm0 +; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: splatconstant_rotate_v32i16: @@ -1126,17 +1126,15 @@ ; AVX512BW-LABEL: splatconstant_rotate_mask_v32i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsllw $5, %zmm0, %zmm1 -; AVX512BW-NEXT: vpsrlw $11, %zmm0, %zmm2 -; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512BW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512BW-NEXT: vpsrlw $11, %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_mask_v32i16: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsllw $5, %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpsrlw $11, %zmm0, %zmm2 -; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512VLBW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512VLBW-NEXT: vpsrlw $11, %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: splatconstant_rotate_mask_v32i16: @@ -1167,9 +1165,9 @@ ; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1 ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm2 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2 -; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512F-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512F-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 +; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8: @@ -1180,41 +1178,41 @@ ; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1 ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2 -; AVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2 -; AVX512VL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512VL-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512VL-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 +; AVX512VL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_rotate_mask_v64i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1 -; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512BW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 +; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_mask_v64i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512VLBW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 +; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: splatconstant_rotate_mask_v64i8: ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm1 -; AVX512VBMI2-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512VBMI2-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512VBMI2-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512VBMI2-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 +; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: splatconstant_rotate_mask_v64i8: ; AVX512VLVBMI2: # %bb.0: ; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm1 -; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 -; AVX512VLVBMI2-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0 +; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512VLVBMI2-NEXT: retq %shl = shl <64 x i8> %a, %lshr = lshr <64 x i8> %a,