Index: llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -2643,6 +2643,21 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N, SDValue &Lo, SDValue &Hi) { + if (N->isAtomic()) { + // It's typical to have larger CAS than atomic load instructions. + SDLoc dl(N); + EVT VT = N->getMemoryVT(); + SDVTList VTs = DAG.getVTList(VT, MVT::i1, MVT::Other); + SDValue Zero = DAG.getConstant(0, dl, VT); + SDValue Swap = DAG.getAtomicCmpSwap( + ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, + VT, VTs, N->getOperand(0), + N->getOperand(1), Zero, Zero, N->getMemOperand()); + ReplaceValueWith(SDValue(N, 0), Swap.getValue(0)); + ReplaceValueWith(SDValue(N, 1), Swap.getValue(2)); + return; + } + if (ISD::isNormalLoad(N)) { ExpandRes_NormalLoad(N, Lo, Hi); return; @@ -3889,6 +3904,16 @@ } SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) { + if (N->isAtomic()) { + // It's typical to have larger CAS than atomic store instructions. + SDLoc dl(N); + SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, + N->getMemoryVT(), + N->getOperand(0), N->getOperand(2), + N->getOperand(1), + N->getMemOperand()); + return Swap.getValue(1); + } if (ISD::isNormalStore(N)) return ExpandOp_NormalStore(N, OpNo); Index: llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -248,6 +248,7 @@ SDLoc dl(N); LoadSDNode *LD = cast(N); + assert(!LD->isAtomic() && "Atomics can not be split"); EVT ValueVT = LD->getValueType(0); EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT); SDValue Chain = LD->getChain(); @@ -459,6 +460,7 @@ SDLoc dl(N); StoreSDNode *St = cast(N); + assert(!St->isAtomic() && "Atomics can not be split"); EVT ValueVT = St->getValue().getValueType(); EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT); SDValue Chain = St->getChain(); Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -87,7 +87,7 @@ cl::Hidden); static cl::opt ExperimentalUnorderedISEL( - "x86-experimental-unordered-atomic-isel", cl::init(false), + "x86-experimental-unordered-atomic-isel", cl::init(true), cl::desc("Use LoadSDNode and StoreSDNode instead of " "AtomicSDNode for unordered atomic loads and " "stores respectively."), Index: llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll =================================================================== --- llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll +++ llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll @@ -21,14 +21,7 @@ ; ; X64-SSE-LABEL: store_fp128: ; X64-SSE: # %bb.0: -; X64-SSE-NEXT: subq $24, %rsp -; X64-SSE-NEXT: .cfi_def_cfa_offset 32 -; X64-SSE-NEXT: movaps %xmm0, (%rsp) -; X64-SSE-NEXT: movq (%rsp), %rsi -; X64-SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx -; X64-SSE-NEXT: callq __sync_lock_test_and_set_16 -; X64-SSE-NEXT: addq $24, %rsp -; X64-SSE-NEXT: .cfi_def_cfa_offset 8 +; X64-SSE-NEXT: movaps %xmm0, (%rdi) ; X64-SSE-NEXT: retq store atomic fp128 %v, fp128* %fptr unordered, align 16 ret void Index: llvm/test/CodeGen/X86/atomic-non-integer.ll =================================================================== --- llvm/test/CodeGen/X86/atomic-non-integer.ll +++ llvm/test/CodeGen/X86/atomic-non-integer.ll @@ -114,12 +114,26 @@ } define void @store_float(float* %fptr, float %v) { -; X86-LABEL: store_float: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, (%eax) -; X86-NEXT: retl +; X86-SSE-LABEL: store_float: +; X86-SSE: # %bb.0: +; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE-NEXT: movss %xmm0, (%eax) +; X86-SSE-NEXT: retl +; +; X86-AVX-LABEL: store_float: +; X86-AVX: # %bb.0: +; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-AVX-NEXT: vmovss %xmm0, (%eax) +; X86-AVX-NEXT: retl +; +; X86-NOSSE-LABEL: store_float: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: fstps (%eax) +; X86-NOSSE-NEXT: retl ; ; X64-SSE-LABEL: store_float: ; X64-SSE: # %bb.0: @@ -162,16 +176,16 @@ ; ; X86-SSE2-LABEL: store_double: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-SSE2-NEXT: movlps %xmm0, (%eax) +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movsd %xmm0, (%eax) ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: store_double: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-AVX-NEXT: vmovlps %xmm0, (%eax) +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-AVX-NEXT: vmovsd %xmm0, (%eax) ; X86-AVX-NEXT: retl ; ; X86-NOSSE-LABEL: store_double: @@ -276,26 +290,12 @@ ; ; X64-SSE-LABEL: store_fp128: ; X64-SSE: # %bb.0: -; X64-SSE-NEXT: subq $24, %rsp -; X64-SSE-NEXT: .cfi_def_cfa_offset 32 -; X64-SSE-NEXT: movaps %xmm0, (%rsp) -; X64-SSE-NEXT: movq (%rsp), %rsi -; X64-SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx -; X64-SSE-NEXT: callq __sync_lock_test_and_set_16 -; X64-SSE-NEXT: addq $24, %rsp -; X64-SSE-NEXT: .cfi_def_cfa_offset 8 +; X64-SSE-NEXT: movaps %xmm0, (%rdi) ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: store_fp128: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: subq $24, %rsp -; X64-AVX-NEXT: .cfi_def_cfa_offset 32 -; X64-AVX-NEXT: vmovaps %xmm0, (%rsp) -; X64-AVX-NEXT: movq (%rsp), %rsi -; X64-AVX-NEXT: movq {{[0-9]+}}(%rsp), %rdx -; X64-AVX-NEXT: callq __sync_lock_test_and_set_16 -; X64-AVX-NEXT: addq $24, %rsp -; X64-AVX-NEXT: .cfi_def_cfa_offset 8 +; X64-AVX-NEXT: vmovaps %xmm0, (%rdi) ; X64-AVX-NEXT: retq store atomic fp128 %v, fp128* %fptr unordered, align 16 ret void @@ -383,53 +383,11 @@ } define float @load_float(float* %fptr) { -; X86-SSE1-LABEL: load_float: -; X86-SSE1: # %bb.0: -; X86-SSE1-NEXT: pushl %eax -; X86-SSE1-NEXT: .cfi_def_cfa_offset 8 -; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE1-NEXT: movl (%eax), %eax -; X86-SSE1-NEXT: movl %eax, (%esp) -; X86-SSE1-NEXT: flds (%esp) -; X86-SSE1-NEXT: popl %eax -; X86-SSE1-NEXT: .cfi_def_cfa_offset 4 -; X86-SSE1-NEXT: retl -; -; X86-SSE2-LABEL: load_float: -; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pushl %eax -; X86-SSE2-NEXT: .cfi_def_cfa_offset 8 -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE2-NEXT: movss %xmm0, (%esp) -; X86-SSE2-NEXT: flds (%esp) -; X86-SSE2-NEXT: popl %eax -; X86-SSE2-NEXT: .cfi_def_cfa_offset 4 -; X86-SSE2-NEXT: retl -; -; X86-AVX-LABEL: load_float: -; X86-AVX: # %bb.0: -; X86-AVX-NEXT: pushl %eax -; X86-AVX-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-AVX-NEXT: vmovss %xmm0, (%esp) -; X86-AVX-NEXT: flds (%esp) -; X86-AVX-NEXT: popl %eax -; X86-AVX-NEXT: .cfi_def_cfa_offset 4 -; X86-AVX-NEXT: retl -; -; X86-NOSSE-LABEL: load_float: -; X86-NOSSE: # %bb.0: -; X86-NOSSE-NEXT: pushl %eax -; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8 -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: movl (%eax), %eax -; X86-NOSSE-NEXT: movl %eax, (%esp) -; X86-NOSSE-NEXT: flds (%esp) -; X86-NOSSE-NEXT: popl %eax -; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4 -; X86-NOSSE-NEXT: retl +; X86-LABEL: load_float: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: flds (%eax) +; X86-NEXT: retl ; ; X64-SSE-LABEL: load_float: ; X64-SSE: # %bb.0: @@ -445,61 +403,11 @@ } define double @load_double(double* %fptr) { -; X86-SSE1-LABEL: load_double: -; X86-SSE1: # %bb.0: -; X86-SSE1-NEXT: subl $20, %esp -; X86-SSE1-NEXT: .cfi_def_cfa_offset 24 -; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE1-NEXT: fildll (%eax) -; X86-SSE1-NEXT: fistpll {{[0-9]+}}(%esp) -; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-SSE1-NEXT: movl %ecx, {{[0-9]+}}(%esp) -; X86-SSE1-NEXT: movl %eax, (%esp) -; X86-SSE1-NEXT: fldl (%esp) -; X86-SSE1-NEXT: addl $20, %esp -; X86-SSE1-NEXT: .cfi_def_cfa_offset 4 -; X86-SSE1-NEXT: retl -; -; X86-SSE2-LABEL: load_double: -; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: subl $12, %esp -; X86-SSE2-NEXT: .cfi_def_cfa_offset 16 -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-SSE2-NEXT: movlps %xmm0, (%esp) -; X86-SSE2-NEXT: fldl (%esp) -; X86-SSE2-NEXT: addl $12, %esp -; X86-SSE2-NEXT: .cfi_def_cfa_offset 4 -; X86-SSE2-NEXT: retl -; -; X86-AVX-LABEL: load_double: -; X86-AVX: # %bb.0: -; X86-AVX-NEXT: subl $12, %esp -; X86-AVX-NEXT: .cfi_def_cfa_offset 16 -; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-AVX-NEXT: vmovlps %xmm0, (%esp) -; X86-AVX-NEXT: fldl (%esp) -; X86-AVX-NEXT: addl $12, %esp -; X86-AVX-NEXT: .cfi_def_cfa_offset 4 -; X86-AVX-NEXT: retl -; -; X86-NOSSE-LABEL: load_double: -; X86-NOSSE: # %bb.0: -; X86-NOSSE-NEXT: subl $20, %esp -; X86-NOSSE-NEXT: .cfi_def_cfa_offset 24 -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: fildll (%eax) -; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: movl %eax, (%esp) -; X86-NOSSE-NEXT: fldl (%esp) -; X86-NOSSE-NEXT: addl $20, %esp -; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4 -; X86-NOSSE-NEXT: retl +; X86-LABEL: load_double: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: fldl (%eax) +; X86-NEXT: retl ; ; X64-SSE-LABEL: load_double: ; X64-SSE: # %bb.0: @@ -557,10 +465,10 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-SSE-NEXT: movl %edi, 8(%esi) -; X86-SSE-NEXT: movl %edx, 12(%esi) -; X86-SSE-NEXT: movl %eax, (%esi) +; X86-SSE-NEXT: movl %edi, 12(%esi) +; X86-SSE-NEXT: movl %edx, 8(%esi) ; X86-SSE-NEXT: movl %ecx, 4(%esi) +; X86-SSE-NEXT: movl %eax, (%esi) ; X86-SSE-NEXT: movl %esi, %eax ; X86-SSE-NEXT: addl $20, %esp ; X86-SSE-NEXT: .cfi_def_cfa_offset 12 @@ -638,10 +546,10 @@ ; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NOSSE-NEXT: movl %edi, 8(%esi) -; X86-NOSSE-NEXT: movl %edx, 12(%esi) -; X86-NOSSE-NEXT: movl %eax, (%esi) +; X86-NOSSE-NEXT: movl %edi, 12(%esi) +; X86-NOSSE-NEXT: movl %edx, 8(%esi) ; X86-NOSSE-NEXT: movl %ecx, 4(%esi) +; X86-NOSSE-NEXT: movl %eax, (%esi) ; X86-NOSSE-NEXT: movl %esi, %eax ; X86-NOSSE-NEXT: addl $20, %esp ; X86-NOSSE-NEXT: .cfi_def_cfa_offset 12 @@ -653,34 +561,12 @@ ; ; X64-SSE-LABEL: load_fp128: ; X64-SSE: # %bb.0: -; X64-SSE-NEXT: subq $24, %rsp -; X64-SSE-NEXT: .cfi_def_cfa_offset 32 -; X64-SSE-NEXT: xorl %esi, %esi -; X64-SSE-NEXT: xorl %edx, %edx -; X64-SSE-NEXT: xorl %ecx, %ecx -; X64-SSE-NEXT: xorl %r8d, %r8d -; X64-SSE-NEXT: callq __sync_val_compare_and_swap_16 -; X64-SSE-NEXT: movq %rdx, {{[0-9]+}}(%rsp) -; X64-SSE-NEXT: movq %rax, (%rsp) -; X64-SSE-NEXT: movaps (%rsp), %xmm0 -; X64-SSE-NEXT: addq $24, %rsp -; X64-SSE-NEXT: .cfi_def_cfa_offset 8 +; X64-SSE-NEXT: movaps (%rdi), %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: load_fp128: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: subq $24, %rsp -; X64-AVX-NEXT: .cfi_def_cfa_offset 32 -; X64-AVX-NEXT: xorl %esi, %esi -; X64-AVX-NEXT: xorl %edx, %edx -; X64-AVX-NEXT: xorl %ecx, %ecx -; X64-AVX-NEXT: xorl %r8d, %r8d -; X64-AVX-NEXT: callq __sync_val_compare_and_swap_16 -; X64-AVX-NEXT: movq %rdx, {{[0-9]+}}(%rsp) -; X64-AVX-NEXT: movq %rax, (%rsp) -; X64-AVX-NEXT: vmovaps (%rsp), %xmm0 -; X64-AVX-NEXT: addq $24, %rsp -; X64-AVX-NEXT: .cfi_def_cfa_offset 8 +; X64-AVX-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX-NEXT: retq %v = load atomic fp128, fp128* %fptr unordered, align 16 ret fp128 %v Index: llvm/test/CodeGen/X86/atomic-unordered.ll =================================================================== --- llvm/test/CodeGen/X86/atomic-unordered.ll +++ llvm/test/CodeGen/X86/atomic-unordered.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-CUR %s -; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-CUR %s -; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-EX %s -; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-EX %s +; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-CUR %s +; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-CUR %s +; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-EX %s +; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-EX %s define i8 @load_i8(i8* %ptr) { ; CHECK-LABEL: load_i8: Index: llvm/test/CodeGen/X86/combineIncDecVector-crash.ll =================================================================== --- llvm/test/CodeGen/X86/combineIncDecVector-crash.ll +++ llvm/test/CodeGen/X86/combineIncDecVector-crash.ll @@ -19,11 +19,11 @@ ; CHECK-NEXT: callq newarray ; CHECK-NEXT: .Ltmp0: ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: addss (%rax), %xmm0 ; CHECK-NEXT: movdqu (%rax), %xmm1 ; CHECK-NEXT: pcmpeqd %xmm2, %xmm2 ; CHECK-NEXT: psubd %xmm2, %xmm1 ; CHECK-NEXT: movdqu %xmm1, (%rax) +; CHECK-NEXT: addss {{.*}}(%rip), %xmm0 ; CHECK-NEXT: movss %xmm0, (%rax) bci_0: %token418 = call token (i64, i32, i8 * (i64, i32, i32, i32)*, i32,