Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6397,10 +6397,7 @@ MachineFunction &MF = getMachineFunction(); - // FIXME: Volatile isn't really correct; we should keep track of atomic - // orderings in the memoperand. - auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | - MachineMemOperand::MOStore; + auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, AAMDNodes(), nullptr, SSID, SuccessOrdering, @@ -6432,11 +6429,7 @@ MachineFunction &MF = getMachineFunction(); // An atomic store does not load. An atomic load does not store. // (An atomicrmw obviously both loads and stores.) - // For now, atomics are considered to be volatile always, and they are - // chained as such. - // FIXME: Volatile isn't really correct; we should keep track of atomic - // orderings in the memoperand. - auto Flags = MachineMemOperand::MOVolatile; + auto Flags = MachineMemOperand::MONone; if (Opcode != ISD::ATOMIC_STORE) Flags |= MachineMemOperand::MOLoad; if (Opcode != ISD::ATOMIC_LOAD) Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4278,6 +4278,7 @@ AtomicOrdering Order = I.getOrdering(); SyncScope::ID SSID = I.getSyncScopeID(); + // FIXME: The chaining of atomic loads is unneccessarily conservative SDValue InChain = getRoot(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -4290,7 +4291,6 @@ MachineMemOperand *MMO = DAG.getMachineFunction(). getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), - MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad, VT.getStoreSize(), I.getAlignment() ? I.getAlignment() : Index: test/CodeGen/X86/atomic-non-integer.ll =================================================================== --- test/CodeGen/X86/atomic-non-integer.ll +++ test/CodeGen/X86/atomic-non-integer.ll @@ -62,8 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: movzwl (%rdi), %eax -; CHECK-NEXT: movzwl %ax, %edi +; CHECK-NEXT: movzwl (%rdi), %edi ; CHECK-NEXT: callq __gnu_h2f_ieee ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 @@ -75,8 +74,7 @@ define float @load_float(float* %fptr) { ; CHECK-LABEL: load_float: ; CHECK: # %bb.0: -; CHECK-NEXT: movl (%rdi), %eax -; CHECK-NEXT: movd %eax, %xmm0 +; CHECK-NEXT: movd (%rdi), %xmm0 ; CHECK-NEXT: retq %v = load atomic float, float* %fptr unordered, align 4 ret float %v @@ -85,8 +83,7 @@ define double @load_double(double* %fptr) { ; CHECK-LABEL: load_double: ; CHECK: # %bb.0: -; CHECK-NEXT: movq (%rdi), %rax -; CHECK-NEXT: movq %rax, %xmm0 +; CHECK-NEXT: movq (%rdi), %xmm0 ; CHECK-NEXT: retq %v = load atomic double, double* %fptr unordered, align 8 ret double %v @@ -136,8 +133,7 @@ define float @load_float_seq_cst(float* %fptr) { ; CHECK-LABEL: load_float_seq_cst: ; CHECK: # %bb.0: -; CHECK-NEXT: movl (%rdi), %eax -; CHECK-NEXT: movd %eax, %xmm0 +; CHECK-NEXT: movd (%rdi), %xmm0 ; CHECK-NEXT: retq %v = load atomic float, float* %fptr seq_cst, align 4 ret float %v @@ -146,8 +142,7 @@ define double @load_double_seq_cst(double* %fptr) { ; CHECK-LABEL: load_double_seq_cst: ; CHECK: # %bb.0: -; CHECK-NEXT: movq (%rdi), %rax -; CHECK-NEXT: movq %rax, %xmm0 +; CHECK-NEXT: movq (%rdi), %xmm0 ; CHECK-NEXT: retq %v = load atomic double, double* %fptr seq_cst, align 8 ret double %v Index: test/CodeGen/X86/atomic_mi.ll =================================================================== --- test/CodeGen/X86/atomic_mi.ll +++ test/CodeGen/X86/atomic_mi.ll @@ -435,9 +435,8 @@ define void @add_32r_seq_cst(i32* %p, i32 %v) { ; X64-LABEL: add_32r_seq_cst: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: addl %esi, %eax -; X64-NEXT: xchgl %eax, (%rdi) +; X64-NEXT: addl (%rdi), %esi +; X64-NEXT: xchgl %esi, (%rdi) ; X64-NEXT: retq ; ; X32-LABEL: add_32r_seq_cst: @@ -515,14 +514,12 @@ define void @sub_32r_self(i32* %p) { ; X64-LABEL: sub_32r_self: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax ; X64-NEXT: movl $0, (%rdi) ; X64-NEXT: retq ; ; X32-LABEL: sub_32r_self: ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl (%eax), %ecx ; X32-NEXT: movl $0, (%eax) ; X32-NEXT: retl %1 = load atomic i32, i32* %p acquire, align 4 @@ -840,9 +837,8 @@ define void @and_32r_seq_cst(i32* %p, i32 %v) { ; X64-LABEL: and_32r_seq_cst: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: andl %esi, %eax -; X64-NEXT: xchgl %eax, (%rdi) +; X64-NEXT: andl (%rdi), %esi +; X64-NEXT: xchgl %esi, (%rdi) ; X64-NEXT: retq ; ; X32-LABEL: and_32r_seq_cst: @@ -1076,9 +1072,8 @@ define void @or_32r_seq_cst(i32* %p, i32 %v) { ; X64-LABEL: or_32r_seq_cst: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: orl %esi, %eax -; X64-NEXT: xchgl %eax, (%rdi) +; X64-NEXT: orl (%rdi), %esi +; X64-NEXT: xchgl %esi, (%rdi) ; X64-NEXT: retq ; ; X32-LABEL: or_32r_seq_cst: @@ -1312,9 +1307,8 @@ define void @xor_32r_seq_cst(i32* %p, i32 %v) { ; X64-LABEL: xor_32r_seq_cst: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: xorl %esi, %eax -; X64-NEXT: xchgl %eax, (%rdi) +; X64-NEXT: xorl (%rdi), %esi +; X64-NEXT: xchgl %esi, (%rdi) ; X64-NEXT: retq ; ; X32-LABEL: xor_32r_seq_cst: